ngram
listlengths
0
67.8k
[ "not language.active: raise ConfigurationError('There is no default language active, please activate it in", "from url parsing @param url_data: dict @return Language object \"\"\" if not url_data['country_code']:", "raise ConfigurationError('There is no default language active, please activate it in admin') else:", "get_languages(): \"\"\" returns Language QuerySet or () \"\"\" languages = list(Language.objects.filter(active=True)) if len(languages)", "url_data['country_code']: language = Language.objects.get(default=True) if not language.active: raise ConfigurationError('There is no default language", "returns Language QuerySet or () \"\"\" languages = list(Language.objects.filter(active=True)) if len(languages) > 1:", "from django.http import Http404 from ..common.errors import ConfigurationError from .models import Language def", "except Language.DoesNotExist: raise Http404 return language def get_languages(): \"\"\" returns Language QuerySet or", "it in admin') else: try: language = Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise Http404 return", "django.http import Http404 from ..common.errors import ConfigurationError from .models import Language def get_language(url_data):", "language active, please activate it in admin') else: try: language = Language.objects.get(country_code=url_data['country_code']) except", "ConfigurationError from .models import Language def get_language(url_data): \"\"\" checks for language in data", "language.active: raise ConfigurationError('There is no default language active, please activate it in admin')", "@param url_data: dict @return Language object \"\"\" if not url_data['country_code']: language = Language.objects.get(default=True)", "if not url_data['country_code']: language = Language.objects.get(default=True) if not language.active: raise ConfigurationError('There is no", "raise Http404 return language def get_languages(): \"\"\" returns Language QuerySet or () \"\"\"", "def get_language(url_data): \"\"\" checks for language in data from url parsing @param url_data:", "else: try: language = Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise Http404 return language def get_languages():", "import Http404 from ..common.errors import ConfigurationError from .models import Language def get_language(url_data): \"\"\"", "please activate it in admin') else: try: language = Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise", "Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise Http404 return language def get_languages(): \"\"\" returns Language QuerySet", "Language QuerySet or () \"\"\" languages = list(Language.objects.filter(active=True)) if len(languages) > 1: return", "return language def get_languages(): \"\"\" returns Language QuerySet or () \"\"\" languages =", "# -*- encoding: utf-8 -*- from django.http import Http404 from ..common.errors import ConfigurationError", "try: language = Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise Http404 return language def get_languages(): \"\"\"", "default language active, please activate it in admin') else: try: language = Language.objects.get(country_code=url_data['country_code'])", "active, please activate it in admin') else: try: language = Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist:", "for language in data from url parsing @param url_data: dict @return Language object", "language = Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise Http404 return language def get_languages(): \"\"\" returns", "encoding: utf-8 -*- from django.http import Http404 from ..common.errors import ConfigurationError from .models", "from .models import Language def get_language(url_data): \"\"\" checks for language in data from", "url_data: dict @return Language object \"\"\" if not url_data['country_code']: language = Language.objects.get(default=True) if", "dict @return Language object \"\"\" if not url_data['country_code']: language = Language.objects.get(default=True) if not", "if not language.active: raise ConfigurationError('There is no default language active, please activate it", "<filename>django_pages/language/__init__.py # -*- encoding: utf-8 -*- from django.http import Http404 from ..common.errors import", "url parsing @param url_data: dict @return Language object \"\"\" if not url_data['country_code']: language", "= Language.objects.get(default=True) if not language.active: raise ConfigurationError('There is no default language active, please", "Http404 return language def get_languages(): \"\"\" returns Language QuerySet or () \"\"\" languages", "\"\"\" returns Language QuerySet or () \"\"\" languages = list(Language.objects.filter(active=True)) if len(languages) >", "() \"\"\" languages = list(Language.objects.filter(active=True)) if len(languages) > 1: return languages return tuple()", "data from url parsing @param url_data: dict @return Language object \"\"\" if not", "language def get_languages(): \"\"\" returns Language QuerySet or () \"\"\" languages = list(Language.objects.filter(active=True))", "utf-8 -*- from django.http import Http404 from ..common.errors import ConfigurationError from .models import", "Language object \"\"\" if not url_data['country_code']: language = Language.objects.get(default=True) if not language.active: raise", "in admin') else: try: language = Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise Http404 return language", "from ..common.errors import ConfigurationError from .models import Language def get_language(url_data): \"\"\" checks for", "object \"\"\" if not url_data['country_code']: language = Language.objects.get(default=True) if not language.active: raise ConfigurationError('There", "activate it in admin') else: try: language = Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise Http404", "is no default language active, please activate it in admin') else: try: language", "admin') else: try: language = Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise Http404 return language def", "def get_languages(): \"\"\" returns Language QuerySet or () \"\"\" languages = list(Language.objects.filter(active=True)) if", "QuerySet or () \"\"\" languages = list(Language.objects.filter(active=True)) if len(languages) > 1: return languages", "..common.errors import ConfigurationError from .models import Language def get_language(url_data): \"\"\" checks for language", "parsing @param url_data: dict @return Language object \"\"\" if not url_data['country_code']: language =", "Language def get_language(url_data): \"\"\" checks for language in data from url parsing @param", "-*- from django.http import Http404 from ..common.errors import ConfigurationError from .models import Language", ".models import Language def get_language(url_data): \"\"\" checks for language in data from url", "language = Language.objects.get(default=True) if not language.active: raise ConfigurationError('There is no default language active,", "Language.objects.get(default=True) if not language.active: raise ConfigurationError('There is no default language active, please activate", "\"\"\" checks for language in data from url parsing @param url_data: dict @return", "-*- encoding: utf-8 -*- from django.http import Http404 from ..common.errors import ConfigurationError from", "language in data from url parsing @param url_data: dict @return Language object \"\"\"", "ConfigurationError('There is no default language active, please activate it in admin') else: try:", "= Language.objects.get(country_code=url_data['country_code']) except Language.DoesNotExist: raise Http404 return language def get_languages(): \"\"\" returns Language", "or () \"\"\" languages = list(Language.objects.filter(active=True)) if len(languages) > 1: return languages return", "Language.DoesNotExist: raise Http404 return language def get_languages(): \"\"\" returns Language QuerySet or ()", "in data from url parsing @param url_data: dict @return Language object \"\"\" if", "\"\"\" if not url_data['country_code']: language = Language.objects.get(default=True) if not language.active: raise ConfigurationError('There is", "not url_data['country_code']: language = Language.objects.get(default=True) if not language.active: raise ConfigurationError('There is no default", "no default language active, please activate it in admin') else: try: language =", "import Language def get_language(url_data): \"\"\" checks for language in data from url parsing", "Http404 from ..common.errors import ConfigurationError from .models import Language def get_language(url_data): \"\"\" checks", "get_language(url_data): \"\"\" checks for language in data from url parsing @param url_data: dict", "import ConfigurationError from .models import Language def get_language(url_data): \"\"\" checks for language in", "@return Language object \"\"\" if not url_data['country_code']: language = Language.objects.get(default=True) if not language.active:", "checks for language in data from url parsing @param url_data: dict @return Language" ]
[ "contour (ndarray): The contour data center (tuple): A tuple containing the center coordinates", "\"\"\" centers = [] for contour in contours: m = cv2.moments(contour) try: x", "top left corner as value \"\"\" x, y, _, _ = cv2.boundingRect(contour) return", "containing tuples of coordinates (x, y) radius (float): the radius around a center", "y, contour). \"\"\" centers = [] for contour in contours: m = cv2.moments(contour)", "shape_factor < 0.85 and area >= thres_area: boxes.append(c) return boxes def find_center (", "contours (list): A list containing contour data Returns: A list containing the center", "1 a += 1 def dist_center_topleft ( contour: numpy.ndarray, center: tuple ) ->", "Args: contours (list): a list containing contour data Returns: A list containing the", "all but one entry in circles given by coordinates and radius Args: coords", "= a + 1 while b < len(coords): if utils.distance(coords[a][:2], coords[b][:2]) <= radius:", "data Returns: A list containing the box contours \"\"\" boxes = list() for", "< len(coords): b = a + 1 while b < len(coords): if utils.distance(coords[a][:2],", "def find_boxes ( contours: list, thres_area: int = 500, pad_ratio: float = 0.05", "list containing contour data Returns: A list containing the center coordinates and the", "(float): the radius around a center where no other center should be \"\"\"", "\"\"\" Find contours that resemble a box Args: contours (list): a list containing", "b = a + 1 while b < len(coords): if utils.distance(coords[a][:2], coords[b][:2]) <=", "= int(m[\"m01\"] / m[\"m00\"]) centers.append((x, y, contour)) except ZeroDivisionError: pass return centers def", "of the filtered image Args: image (ndarray): the filtered image Returns: A list", "mode: int = cv2.RETR_LIST, method: int = cv2.CHAIN_APPROX_SIMPLE ) -> list: \"\"\" Find", "a box Args: contours (list): a list containing contour data Returns: A list", "= list() for c in contours: area = cv2.contourArea(c) perimeter = cv2.arcLength(c, True)", "all contours of the filtered image Args: image (ndarray): the filtered image Returns:", "coordinates and the contour as tuples (x, y, contour). \"\"\" centers = []", "y, contour)) except ZeroDivisionError: pass return centers def filter_centers ( coords: list, radius:", "one entry in circles given by coordinates and radius Args: coords (list): a", "the contour as tuples (x, y, contour). \"\"\" centers = [] for contour", "data Returns: A list containing the center coordinates and the contour as tuples", "A list containing contour data \"\"\" cnts = cv2.findContours(image.copy(), mode, method) return imutils.grab_contours(cnts)", "while a < len(coords): b = a + 1 while b < len(coords):", "1 def dist_center_topleft ( contour: numpy.ndarray, center: tuple ) -> float: \"\"\" Calculates", "as tuples (x, y, contour). \"\"\" centers = [] for contour in contours:", "tuple containing the center coordinates (x, y) Returns: A float with the distance", "other center should be \"\"\" a = 0 while a < len(coords): b", "MIT # Copyright (c) 2019 Akumatic import cv2, imutils, numpy from . import", "boxes def find_center ( contours: list ) -> list: \"\"\" Find the center", "+= 1 def dist_center_topleft ( contour: numpy.ndarray, center: tuple ) -> float: \"\"\"", "list containing contour data \"\"\" cnts = cv2.findContours(image.copy(), mode, method) return imutils.grab_contours(cnts) def", "contours. Args: contours (list): A list containing contour data Returns: A list containing", "corner as value \"\"\" x, y, _, _ = cv2.boundingRect(contour) return utils.distance((x, y),", "-> list: \"\"\" Find the center coordinates of all given contours. Args: contours", "( contour: numpy.ndarray, center: tuple ) -> float: \"\"\" Calculates the distance from", "coords[b] else: b += 1 a += 1 def dist_center_topleft ( contour: numpy.ndarray,", "imutils.grab_contours(cnts) def find_boxes ( contours: list, thres_area: int = 500, pad_ratio: float =", "= cv2.contourArea(c) perimeter = cv2.arcLength(c, True) shape_factor = utils.circularity(area, perimeter) if 0.7 <", "# Contour operations # ###################### def find_contours ( image: numpy.ndarray, mode: int =", "def filter_centers ( coords: list, radius: int ): \"\"\" Removes all but one", "coordinates and radius Args: coords (list): a list containing tuples of coordinates (x,", "Copyright (c) 2019 Akumatic import cv2, imutils, numpy from . import utils ######################", "as value \"\"\" x, y, _, _ = cv2.boundingRect(contour) return utils.distance((x, y), center[:2])", "m[\"m00\"]) y = int(m[\"m01\"] / m[\"m00\"]) centers.append((x, y, contour)) except ZeroDivisionError: pass return", "dist_center_topleft ( contour: numpy.ndarray, center: tuple ) -> float: \"\"\" Calculates the distance", "find_center ( contours: list ) -> list: \"\"\" Find the center coordinates of", "( contours: list ) -> list: \"\"\" Find the center coordinates of all", "(list): a list containing tuples of coordinates (x, y) radius (float): the radius", "centers def filter_centers ( coords: list, radius: int ): \"\"\" Removes all but", "image Returns: A list containing contour data \"\"\" cnts = cv2.findContours(image.copy(), mode, method)", "Akumatic import cv2, imutils, numpy from . import utils ###################### # Contour operations", "in contours: m = cv2.moments(contour) try: x = int(m[\"m10\"] / m[\"m00\"]) y =", "list: \"\"\" Find the center coordinates of all given contours. Args: contours (list):", "2019 Akumatic import cv2, imutils, numpy from . import utils ###################### # Contour", "(c) 2019 Akumatic import cv2, imutils, numpy from . import utils ###################### #", "(x, y, contour). \"\"\" centers = [] for contour in contours: m =", "by coordinates and radius Args: coords (list): a list containing tuples of coordinates", "the filtered image Returns: A list containing contour data \"\"\" cnts = cv2.findContours(image.copy(),", "<reponame>Akumatic/ExamScan # SPDX-License-Identifier: MIT # Copyright (c) 2019 Akumatic import cv2, imutils, numpy", "A list containing contour data Returns: A list containing the center coordinates and", "thres_area: boxes.append(c) return boxes def find_center ( contours: list ) -> list: \"\"\"", "###################### def find_contours ( image: numpy.ndarray, mode: int = cv2.RETR_LIST, method: int =", "it's top left corner Args: contour (ndarray): The contour data center (tuple): A", "in contours: area = cv2.contourArea(c) perimeter = cv2.arcLength(c, True) shape_factor = utils.circularity(area, perimeter)", "cv2, imutils, numpy from . import utils ###################### # Contour operations # ######################", "to the top left corner as value \"\"\" x, y, _, _ =", "the filtered image Args: image (ndarray): the filtered image Returns: A list containing", "Returns: A list containing the center coordinates and the contour as tuples (x,", "data \"\"\" cnts = cv2.findContours(image.copy(), mode, method) return imutils.grab_contours(cnts) def find_boxes ( contours:", "and area >= thres_area: boxes.append(c) return boxes def find_center ( contours: list )", "containing the center coordinates and the contour as tuples (x, y, contour). \"\"\"", "data center (tuple): A tuple containing the center coordinates (x, y) Returns: A", "else: b += 1 a += 1 def dist_center_topleft ( contour: numpy.ndarray, center:", "y) radius (float): the radius around a center where no other center should", "method: int = cv2.CHAIN_APPROX_SIMPLE ) -> list: \"\"\" Find all contours of the", "contour data center (tuple): A tuple containing the center coordinates (x, y) Returns:", "but one entry in circles given by coordinates and radius Args: coords (list):", "contour as tuples (x, y, contour). \"\"\" centers = [] for contour in", "center where no other center should be \"\"\" a = 0 while a", "the top left corner as value \"\"\" x, y, _, _ = cv2.boundingRect(contour)", "be \"\"\" a = 0 while a < len(coords): b = a +", "except ZeroDivisionError: pass return centers def filter_centers ( coords: list, radius: int ):", "x = int(m[\"m10\"] / m[\"m00\"]) y = int(m[\"m01\"] / m[\"m00\"]) centers.append((x, y, contour))", "<= radius: del coords[b] else: b += 1 a += 1 def dist_center_topleft", "if utils.distance(coords[a][:2], coords[b][:2]) <= radius: del coords[b] else: b += 1 a +=", "box contours \"\"\" boxes = list() for c in contours: area = cv2.contourArea(c)", "int ): \"\"\" Removes all but one entry in circles given by coordinates", "(list): A list containing contour data Returns: A list containing the center coordinates", "cv2.findContours(image.copy(), mode, method) return imutils.grab_contours(cnts) def find_boxes ( contours: list, thres_area: int =", "utils.distance(coords[a][:2], coords[b][:2]) <= radius: del coords[b] else: b += 1 a += 1", "no other center should be \"\"\" a = 0 while a < len(coords):", "image: numpy.ndarray, mode: int = cv2.RETR_LIST, method: int = cv2.CHAIN_APPROX_SIMPLE ) -> list:", "contour)) except ZeroDivisionError: pass return centers def filter_centers ( coords: list, radius: int", "the center of a given contour to it's top left corner Args: contour", "\"\"\" Calculates the distance from the center of a given contour to it's", "contours (list): a list containing contour data Returns: A list containing the box", "contours: list, thres_area: int = 500, pad_ratio: float = 0.05 ) -> (list,", "containing the center coordinates (x, y) Returns: A float with the distance from", "( contours: list, thres_area: int = 500, pad_ratio: float = 0.05 ) ->", "centers = [] for contour in contours: m = cv2.moments(contour) try: x =", "utils ###################### # Contour operations # ###################### def find_contours ( image: numpy.ndarray, mode:", "given by coordinates and radius Args: coords (list): a list containing tuples of", ") -> (list, list): \"\"\" Find contours that resemble a box Args: contours", "= int(m[\"m10\"] / m[\"m00\"]) y = int(m[\"m01\"] / m[\"m00\"]) centers.append((x, y, contour)) except", "distance from center to the top left corner as value \"\"\" x, y,", "list() for c in contours: area = cv2.contourArea(c) perimeter = cv2.arcLength(c, True) shape_factor", "A list containing the center coordinates and the contour as tuples (x, y,", "A list containing the box contours \"\"\" boxes = list() for c in", "given contours. Args: contours (list): A list containing contour data Returns: A list", "A float with the distance from center to the top left corner as", "distance from the center of a given contour to it's top left corner", "(ndarray): The contour data center (tuple): A tuple containing the center coordinates (x,", "given contour to it's top left corner Args: contour (ndarray): The contour data", "center: tuple ) -> float: \"\"\" Calculates the distance from the center of", "float with the distance from center to the top left corner as value", "a += 1 def dist_center_topleft ( contour: numpy.ndarray, center: tuple ) -> float:", "###################### # Contour operations # ###################### def find_contours ( image: numpy.ndarray, mode: int", "numpy.ndarray, mode: int = cv2.RETR_LIST, method: int = cv2.CHAIN_APPROX_SIMPLE ) -> list: \"\"\"", "all given contours. Args: contours (list): A list containing contour data Returns: A", "contours \"\"\" boxes = list() for c in contours: area = cv2.contourArea(c) perimeter", "= [] for contour in contours: m = cv2.moments(contour) try: x = int(m[\"m10\"]", "that resemble a box Args: contours (list): a list containing contour data Returns:", "< 0.85 and area >= thres_area: boxes.append(c) return boxes def find_center ( contours:", "( image: numpy.ndarray, mode: int = cv2.RETR_LIST, method: int = cv2.CHAIN_APPROX_SIMPLE ) ->", "mode, method) return imutils.grab_contours(cnts) def find_boxes ( contours: list, thres_area: int = 500,", "import cv2, imutils, numpy from . import utils ###################### # Contour operations #", "for contour in contours: m = cv2.moments(contour) try: x = int(m[\"m10\"] / m[\"m00\"])", ") -> list: \"\"\" Find the center coordinates of all given contours. Args:", "list: \"\"\" Find all contours of the filtered image Args: image (ndarray): the", "contour: numpy.ndarray, center: tuple ) -> float: \"\"\" Calculates the distance from the", "left corner as value \"\"\" x, y, _, _ = cv2.boundingRect(contour) return utils.distance((x,", "del coords[b] else: b += 1 a += 1 def dist_center_topleft ( contour:", "contour). \"\"\" centers = [] for contour in contours: m = cv2.moments(contour) try:", "= cv2.RETR_LIST, method: int = cv2.CHAIN_APPROX_SIMPLE ) -> list: \"\"\" Find all contours", "area = cv2.contourArea(c) perimeter = cv2.arcLength(c, True) shape_factor = utils.circularity(area, perimeter) if 0.7", "def find_center ( contours: list ) -> list: \"\"\" Find the center coordinates", "containing the box contours \"\"\" boxes = list() for c in contours: area", "shape_factor = utils.circularity(area, perimeter) if 0.7 < shape_factor < 0.85 and area >=", "image (ndarray): the filtered image Returns: A list containing contour data \"\"\" cnts", "c in contours: area = cv2.contourArea(c) perimeter = cv2.arcLength(c, True) shape_factor = utils.circularity(area,", "around a center where no other center should be \"\"\" a = 0", "a + 1 while b < len(coords): if utils.distance(coords[a][:2], coords[b][:2]) <= radius: del", "Find all contours of the filtered image Args: image (ndarray): the filtered image", "of a given contour to it's top left corner Args: contour (ndarray): The", "500, pad_ratio: float = 0.05 ) -> (list, list): \"\"\" Find contours that", "image Args: image (ndarray): the filtered image Returns: A list containing contour data", "Args: coords (list): a list containing tuples of coordinates (x, y) radius (float):", "(list): a list containing contour data Returns: A list containing the box contours", "imutils, numpy from . import utils ###################### # Contour operations # ###################### def", "a list containing contour data Returns: A list containing the box contours \"\"\"", "0.7 < shape_factor < 0.85 and area >= thres_area: boxes.append(c) return boxes def", "Contour operations # ###################### def find_contours ( image: numpy.ndarray, mode: int = cv2.RETR_LIST,", "list containing tuples of coordinates (x, y) radius (float): the radius around a", "try: x = int(m[\"m10\"] / m[\"m00\"]) y = int(m[\"m01\"] / m[\"m00\"]) centers.append((x, y,", "thres_area: int = 500, pad_ratio: float = 0.05 ) -> (list, list): \"\"\"", "float = 0.05 ) -> (list, list): \"\"\" Find contours that resemble a", "center should be \"\"\" a = 0 while a < len(coords): b =", "Removes all but one entry in circles given by coordinates and radius Args:", "\"\"\" Find the center coordinates of all given contours. Args: contours (list): A", "with the distance from center to the top left corner as value \"\"\"", "Find contours that resemble a box Args: contours (list): a list containing contour", "def dist_center_topleft ( contour: numpy.ndarray, center: tuple ) -> float: \"\"\" Calculates the", "coordinates of all given contours. Args: contours (list): A list containing contour data", "Find the center coordinates of all given contours. Args: contours (list): A list", "cv2.RETR_LIST, method: int = cv2.CHAIN_APPROX_SIMPLE ) -> list: \"\"\" Find all contours of", "radius: int ): \"\"\" Removes all but one entry in circles given by", "contours that resemble a box Args: contours (list): a list containing contour data", "left corner Args: contour (ndarray): The contour data center (tuple): A tuple containing", "filter_centers ( coords: list, radius: int ): \"\"\" Removes all but one entry", "int(m[\"m01\"] / m[\"m00\"]) centers.append((x, y, contour)) except ZeroDivisionError: pass return centers def filter_centers", "centers.append((x, y, contour)) except ZeroDivisionError: pass return centers def filter_centers ( coords: list,", "< shape_factor < 0.85 and area >= thres_area: boxes.append(c) return boxes def find_center", "coordinates (x, y) radius (float): the radius around a center where no other", "boxes.append(c) return boxes def find_center ( contours: list ) -> list: \"\"\" Find", "radius (float): the radius around a center where no other center should be", "the center coordinates of all given contours. Args: contours (list): A list containing", "list): \"\"\" Find contours that resemble a box Args: contours (list): a list", "from center to the top left corner as value \"\"\" x, y, _,", "center of a given contour to it's top left corner Args: contour (ndarray):", "= 500, pad_ratio: float = 0.05 ) -> (list, list): \"\"\" Find contours", "\"\"\" boxes = list() for c in contours: area = cv2.contourArea(c) perimeter =", "= utils.circularity(area, perimeter) if 0.7 < shape_factor < 0.85 and area >= thres_area:", "center (tuple): A tuple containing the center coordinates (x, y) Returns: A float", "coords: list, radius: int ): \"\"\" Removes all but one entry in circles", "SPDX-License-Identifier: MIT # Copyright (c) 2019 Akumatic import cv2, imutils, numpy from .", "Returns: A list containing contour data \"\"\" cnts = cv2.findContours(image.copy(), mode, method) return", "area >= thres_area: boxes.append(c) return boxes def find_center ( contours: list ) ->", "filtered image Args: image (ndarray): the filtered image Returns: A list containing contour", "list containing the box contours \"\"\" boxes = list() for c in contours:", "boxes = list() for c in contours: area = cv2.contourArea(c) perimeter = cv2.arcLength(c,", "to it's top left corner Args: contour (ndarray): The contour data center (tuple):", "contours of the filtered image Args: image (ndarray): the filtered image Returns: A", ") -> float: \"\"\" Calculates the distance from the center of a given", "float: \"\"\" Calculates the distance from the center of a given contour to", "+= 1 a += 1 def dist_center_topleft ( contour: numpy.ndarray, center: tuple )", "list containing contour data Returns: A list containing the box contours \"\"\" boxes", "Calculates the distance from the center of a given contour to it's top", "0.85 and area >= thres_area: boxes.append(c) return boxes def find_center ( contours: list", "top left corner Args: contour (ndarray): The contour data center (tuple): A tuple", "box Args: contours (list): a list containing contour data Returns: A list containing", "m = cv2.moments(contour) try: x = int(m[\"m10\"] / m[\"m00\"]) y = int(m[\"m01\"] /", "return centers def filter_centers ( coords: list, radius: int ): \"\"\" Removes all", "cv2.moments(contour) try: x = int(m[\"m10\"] / m[\"m00\"]) y = int(m[\"m01\"] / m[\"m00\"]) centers.append((x,", "True) shape_factor = utils.circularity(area, perimeter) if 0.7 < shape_factor < 0.85 and area", "the distance from the center of a given contour to it's top left", "int(m[\"m10\"] / m[\"m00\"]) y = int(m[\"m01\"] / m[\"m00\"]) centers.append((x, y, contour)) except ZeroDivisionError:", "(list, list): \"\"\" Find contours that resemble a box Args: contours (list): a", "of all given contours. Args: contours (list): A list containing contour data Returns:", "tuples of coordinates (x, y) radius (float): the radius around a center where", "\"\"\" Removes all but one entry in circles given by coordinates and radius", "\"\"\" Find all contours of the filtered image Args: image (ndarray): the filtered", "contour to it's top left corner Args: contour (ndarray): The contour data center", "pad_ratio: float = 0.05 ) -> (list, list): \"\"\" Find contours that resemble", "# SPDX-License-Identifier: MIT # Copyright (c) 2019 Akumatic import cv2, imutils, numpy from", "tuple ) -> float: \"\"\" Calculates the distance from the center of a", "center coordinates and the contour as tuples (x, y, contour). \"\"\" centers =", "and radius Args: coords (list): a list containing tuples of coordinates (x, y)", "find_boxes ( contours: list, thres_area: int = 500, pad_ratio: float = 0.05 )", "import utils ###################### # Contour operations # ###################### def find_contours ( image: numpy.ndarray,", "contour data Returns: A list containing the box contours \"\"\" boxes = list()", "y) Returns: A float with the distance from center to the top left", "containing contour data Returns: A list containing the box contours \"\"\" boxes =", "-> float: \"\"\" Calculates the distance from the center of a given contour", "= cv2.CHAIN_APPROX_SIMPLE ) -> list: \"\"\" Find all contours of the filtered image", "contours: m = cv2.moments(contour) try: x = int(m[\"m10\"] / m[\"m00\"]) y = int(m[\"m01\"]", "cnts = cv2.findContours(image.copy(), mode, method) return imutils.grab_contours(cnts) def find_boxes ( contours: list, thres_area:", "int = 500, pad_ratio: float = 0.05 ) -> (list, list): \"\"\" Find", "of coordinates (x, y) radius (float): the radius around a center where no", "contour data Returns: A list containing the center coordinates and the contour as", "utils.circularity(area, perimeter) if 0.7 < shape_factor < 0.85 and area >= thres_area: boxes.append(c)", "+ 1 while b < len(coords): if utils.distance(coords[a][:2], coords[b][:2]) <= radius: del coords[b]", "where no other center should be \"\"\" a = 0 while a <", "= 0 while a < len(coords): b = a + 1 while b", "list containing the center coordinates and the contour as tuples (x, y, contour).", "list ) -> list: \"\"\" Find the center coordinates of all given contours.", "return imutils.grab_contours(cnts) def find_boxes ( contours: list, thres_area: int = 500, pad_ratio: float", "the center coordinates and the contour as tuples (x, y, contour). \"\"\" centers", "pass return centers def filter_centers ( coords: list, radius: int ): \"\"\" Removes", "radius: del coords[b] else: b += 1 a += 1 def dist_center_topleft (", "coordinates (x, y) Returns: A float with the distance from center to the", "numpy from . import utils ###################### # Contour operations # ###################### def find_contours", "and the contour as tuples (x, y, contour). \"\"\" centers = [] for", ". import utils ###################### # Contour operations # ###################### def find_contours ( image:", "numpy.ndarray, center: tuple ) -> float: \"\"\" Calculates the distance from the center", "perimeter = cv2.arcLength(c, True) shape_factor = utils.circularity(area, perimeter) if 0.7 < shape_factor <", "(ndarray): the filtered image Returns: A list containing contour data \"\"\" cnts =", "m[\"m00\"]) centers.append((x, y, contour)) except ZeroDivisionError: pass return centers def filter_centers ( coords:", "filtered image Returns: A list containing contour data \"\"\" cnts = cv2.findContours(image.copy(), mode,", "/ m[\"m00\"]) centers.append((x, y, contour)) except ZeroDivisionError: pass return centers def filter_centers (", ") -> list: \"\"\" Find all contours of the filtered image Args: image", "list, thres_area: int = 500, pad_ratio: float = 0.05 ) -> (list, list):", "= cv2.arcLength(c, True) shape_factor = utils.circularity(area, perimeter) if 0.7 < shape_factor < 0.85", "int = cv2.RETR_LIST, method: int = cv2.CHAIN_APPROX_SIMPLE ) -> list: \"\"\" Find all", "< len(coords): if utils.distance(coords[a][:2], coords[b][:2]) <= radius: del coords[b] else: b += 1", "contours: list ) -> list: \"\"\" Find the center coordinates of all given", "list, radius: int ): \"\"\" Removes all but one entry in circles given", "\"\"\" cnts = cv2.findContours(image.copy(), mode, method) return imutils.grab_contours(cnts) def find_boxes ( contours: list,", "radius Args: coords (list): a list containing tuples of coordinates (x, y) radius", "radius around a center where no other center should be \"\"\" a =", "Args: image (ndarray): the filtered image Returns: A list containing contour data \"\"\"", "Returns: A list containing the box contours \"\"\" boxes = list() for c", "# Copyright (c) 2019 Akumatic import cv2, imutils, numpy from . import utils", "operations # ###################### def find_contours ( image: numpy.ndarray, mode: int = cv2.RETR_LIST, method:", "a list containing tuples of coordinates (x, y) radius (float): the radius around", "the radius around a center where no other center should be \"\"\" a", "if 0.7 < shape_factor < 0.85 and area >= thres_area: boxes.append(c) return boxes", "center coordinates of all given contours. Args: contours (list): A list containing contour", "corner Args: contour (ndarray): The contour data center (tuple): A tuple containing the", "ZeroDivisionError: pass return centers def filter_centers ( coords: list, radius: int ): \"\"\"", "Args: contours (list): A list containing contour data Returns: A list containing the", "b += 1 a += 1 def dist_center_topleft ( contour: numpy.ndarray, center: tuple", "resemble a box Args: contours (list): a list containing contour data Returns: A", "circles given by coordinates and radius Args: coords (list): a list containing tuples", "from the center of a given contour to it's top left corner Args:", "find_contours ( image: numpy.ndarray, mode: int = cv2.RETR_LIST, method: int = cv2.CHAIN_APPROX_SIMPLE )", "(tuple): A tuple containing the center coordinates (x, y) Returns: A float with", "1 while b < len(coords): if utils.distance(coords[a][:2], coords[b][:2]) <= radius: del coords[b] else:", "Args: contour (ndarray): The contour data center (tuple): A tuple containing the center", "a center where no other center should be \"\"\" a = 0 while", "the distance from center to the top left corner as value \"\"\" x,", "tuples (x, y, contour). \"\"\" centers = [] for contour in contours: m", "for c in contours: area = cv2.contourArea(c) perimeter = cv2.arcLength(c, True) shape_factor =", "a given contour to it's top left corner Args: contour (ndarray): The contour", "contour in contours: m = cv2.moments(contour) try: x = int(m[\"m10\"] / m[\"m00\"]) y", "( coords: list, radius: int ): \"\"\" Removes all but one entry in", "b < len(coords): if utils.distance(coords[a][:2], coords[b][:2]) <= radius: del coords[b] else: b +=", "Returns: A float with the distance from center to the top left corner", "center to the top left corner as value \"\"\" x, y, _, _", "def find_contours ( image: numpy.ndarray, mode: int = cv2.RETR_LIST, method: int = cv2.CHAIN_APPROX_SIMPLE", "contour data \"\"\" cnts = cv2.findContours(image.copy(), mode, method) return imutils.grab_contours(cnts) def find_boxes (", "(x, y) radius (float): the radius around a center where no other center", "return boxes def find_center ( contours: list ) -> list: \"\"\" Find the", "in circles given by coordinates and radius Args: coords (list): a list containing", "coords[b][:2]) <= radius: del coords[b] else: b += 1 a += 1 def", "a < len(coords): b = a + 1 while b < len(coords): if", "= cv2.findContours(image.copy(), mode, method) return imutils.grab_contours(cnts) def find_boxes ( contours: list, thres_area: int", "= cv2.moments(contour) try: x = int(m[\"m10\"] / m[\"m00\"]) y = int(m[\"m01\"] / m[\"m00\"])", "containing contour data Returns: A list containing the center coordinates and the contour", "should be \"\"\" a = 0 while a < len(coords): b = a", "The contour data center (tuple): A tuple containing the center coordinates (x, y)", "(x, y) Returns: A float with the distance from center to the top", "\"\"\" a = 0 while a < len(coords): b = a + 1", "the center coordinates (x, y) Returns: A float with the distance from center", "0.05 ) -> (list, list): \"\"\" Find contours that resemble a box Args:", "center coordinates (x, y) Returns: A float with the distance from center to", "method) return imutils.grab_contours(cnts) def find_boxes ( contours: list, thres_area: int = 500, pad_ratio:", "cv2.contourArea(c) perimeter = cv2.arcLength(c, True) shape_factor = utils.circularity(area, perimeter) if 0.7 < shape_factor", "A tuple containing the center coordinates (x, y) Returns: A float with the", "a = 0 while a < len(coords): b = a + 1 while", "cv2.arcLength(c, True) shape_factor = utils.circularity(area, perimeter) if 0.7 < shape_factor < 0.85 and", "entry in circles given by coordinates and radius Args: coords (list): a list", "0 while a < len(coords): b = a + 1 while b <", ">= thres_area: boxes.append(c) return boxes def find_center ( contours: list ) -> list:", "[] for contour in contours: m = cv2.moments(contour) try: x = int(m[\"m10\"] /", "from . import utils ###################### # Contour operations # ###################### def find_contours (", "containing contour data \"\"\" cnts = cv2.findContours(image.copy(), mode, method) return imutils.grab_contours(cnts) def find_boxes", "# ###################### def find_contours ( image: numpy.ndarray, mode: int = cv2.RETR_LIST, method: int", "cv2.CHAIN_APPROX_SIMPLE ) -> list: \"\"\" Find all contours of the filtered image Args:", "): \"\"\" Removes all but one entry in circles given by coordinates and", "len(coords): b = a + 1 while b < len(coords): if utils.distance(coords[a][:2], coords[b][:2])", "the box contours \"\"\" boxes = list() for c in contours: area =", "-> (list, list): \"\"\" Find contours that resemble a box Args: contours (list):", "y = int(m[\"m01\"] / m[\"m00\"]) centers.append((x, y, contour)) except ZeroDivisionError: pass return centers", "-> list: \"\"\" Find all contours of the filtered image Args: image (ndarray):", "len(coords): if utils.distance(coords[a][:2], coords[b][:2]) <= radius: del coords[b] else: b += 1 a", "while b < len(coords): if utils.distance(coords[a][:2], coords[b][:2]) <= radius: del coords[b] else: b", "/ m[\"m00\"]) y = int(m[\"m01\"] / m[\"m00\"]) centers.append((x, y, contour)) except ZeroDivisionError: pass", "int = cv2.CHAIN_APPROX_SIMPLE ) -> list: \"\"\" Find all contours of the filtered", "coords (list): a list containing tuples of coordinates (x, y) radius (float): the", "perimeter) if 0.7 < shape_factor < 0.85 and area >= thres_area: boxes.append(c) return", "= 0.05 ) -> (list, list): \"\"\" Find contours that resemble a box", "contours: area = cv2.contourArea(c) perimeter = cv2.arcLength(c, True) shape_factor = utils.circularity(area, perimeter) if" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "distutils from setuptools import setup, find_packages import pathlib here = pathlib.Path(__file__).parent.resolve() # Get", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "Approved :: Apache License v2', 'Programming Language :: Python :: 3', 'Programming Language", "License. # You may obtain a copy of the License at # #", "prefer setuptools over distutils from setuptools import setup, find_packages import pathlib here =", "Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python", "'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4', install_requires=['validators'], project_urls={ 'Bug Reports': 'https://github.com/aerocyber/ShortLink/issues', 'Source': 'https://github.com/aerocyber/ShortLink', }, )", "classifiers=[ 'License :: OSI Approved :: Apache License v2', 'Programming Language :: Python", "long_description = (here / 'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0', description='Python library that act as", "3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9',", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", ":: Python :: 3.9', 'Programming Language :: Python :: 3 :: Only', ],", ":: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language ::", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4', install_requires=['validators'], project_urls={ 'Bug Reports': 'https://github.com/aerocyber/ShortLink/issues', 'Source': 'https://github.com/aerocyber/ShortLink', },", "Language :: Python :: 3 :: Only', ], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4',", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language", "a stand alone link shorten utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License", "you may not use this file except in compliance with the License. #", "], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4', install_requires=['validators'], project_urls={ 'Bug Reports': 'https://github.com/aerocyber/ShortLink/issues', 'Source': 'https://github.com/aerocyber/ShortLink',", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "pathlib.Path(__file__).parent.resolve() # Get the long description from the README file long_description = (here", "language governing permissions and # limitations under the License. # Always prefer setuptools", "README file long_description = (here / 'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0', description='Python library that", "pathlib here = pathlib.Path(__file__).parent.resolve() # Get the long description from the README file", "ANY KIND, either express or implied. # See the License for the specific", "specific language governing permissions and # limitations under the License. # Always prefer", "Python :: 3 :: Only', ], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4', install_requires=['validators'], project_urls={", "v2', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6',", "stand alone link shorten utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License ::", "in compliance with the License. # You may obtain a copy of the", "= (here / 'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0', description='Python library that act as a", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "<reponame>aerocyber/ShortLink # Copyright 2021 aditya # # Licensed under the Apache License, Version", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "setup( name='ShortLink', version='0.1.0', description='Python library that act as a stand alone link shorten", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "shorten utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License :: OSI Approved ::", "not use this file except in compliance with the License. # You may", "'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3 ::", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language", "3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3", "version='0.1.0', description='Python library that act as a stand alone link shorten utility.', long_description=long_description,", "from the README file long_description = (here / 'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0', description='Python", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", ":: 3 :: Only', ], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4', install_requires=['validators'], project_urls={ 'Bug", "the License. # Always prefer setuptools over distutils from setuptools import setup, find_packages", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8',", "# Copyright 2021 aditya # # Licensed under the Apache License, Version 2.0", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "here = pathlib.Path(__file__).parent.resolve() # Get the long description from the README file long_description", "3.9', 'Programming Language :: Python :: 3 :: Only', ], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'),", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "the README file long_description = (here / 'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0', description='Python library", "url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License :: OSI Approved :: Apache License v2', 'Programming", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "3 :: Only', ], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4', install_requires=['validators'], project_urls={ 'Bug Reports':", "'Programming Language :: Python :: 3 :: Only', ], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6,", "OF ANY KIND, either express or implied. # See the License for the", "# Always prefer setuptools over distutils from setuptools import setup, find_packages import pathlib", ":: Apache License v2', 'Programming Language :: Python :: 3', 'Programming Language ::", "2.0 (the \"License\"); # you may not use this file except in compliance", "as a stand alone link shorten utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[", "# you may not use this file except in compliance with the License.", "setuptools over distutils from setuptools import setup, find_packages import pathlib here = pathlib.Path(__file__).parent.resolve()", "/ 'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0', description='Python library that act as a stand alone", ":: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python ::", "for the specific language governing permissions and # limitations under the License. #", "agreed to in writing, software # distributed under the License is distributed on", "'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming", "(here / 'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0', description='Python library that act as a stand", "Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", ":: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language ::", "setup, find_packages import pathlib here = pathlib.Path(__file__).parent.resolve() # Get the long description from", "Python :: 3.9', 'Programming Language :: Python :: 3 :: Only', ], package_dir={'':", "2021 aditya # # Licensed under the Apache License, Version 2.0 (the \"License\");", "over distutils from setuptools import setup, find_packages import pathlib here = pathlib.Path(__file__).parent.resolve() #", ":: Only', ], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4', install_requires=['validators'], project_urls={ 'Bug Reports': 'https://github.com/aerocyber/ShortLink/issues',", "OSI Approved :: Apache License v2', 'Programming Language :: Python :: 3', 'Programming", "(the \"License\"); # you may not use this file except in compliance with", "Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language", "permissions and # limitations under the License. # Always prefer setuptools over distutils", "# # Unless required by applicable law or agreed to in writing, software", "description='Python library that act as a stand alone link shorten utility.', long_description=long_description, long_description_content_type='text/markdown',", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "Apache License v2', 'Programming Language :: Python :: 3', 'Programming Language :: Python", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "that act as a stand alone link shorten utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>',", "either express or implied. # See the License for the specific language governing", "limitations under the License. # Always prefer setuptools over distutils from setuptools import", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "governing permissions and # limitations under the License. # Always prefer setuptools over", "author_email='<EMAIL>', classifiers=[ 'License :: OSI Approved :: Apache License v2', 'Programming Language ::", "Always prefer setuptools over distutils from setuptools import setup, find_packages import pathlib here", "setuptools import setup, find_packages import pathlib here = pathlib.Path(__file__).parent.resolve() # Get the long", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "find_packages import pathlib here = pathlib.Path(__file__).parent.resolve() # Get the long description from the", "library that act as a stand alone link shorten utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink',", "'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0', description='Python library that act as a stand alone link", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "aditya # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language", "Get the long description from the README file long_description = (here / 'README.md').read_text(encoding='utf-8')", "# Get the long description from the README file long_description = (here /", "file except in compliance with the License. # You may obtain a copy", "Only', ], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4', install_requires=['validators'], project_urls={ 'Bug Reports': 'https://github.com/aerocyber/ShortLink/issues', 'Source':", "the long description from the README file long_description = (here / 'README.md').read_text(encoding='utf-8') setup(", "long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License :: OSI Approved :: Apache License v2',", "import setup, find_packages import pathlib here = pathlib.Path(__file__).parent.resolve() # Get the long description", "Copyright 2021 aditya # # Licensed under the Apache License, Version 2.0 (the", "'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", ":: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python ::", ":: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language ::", "License for the specific language governing permissions and # limitations under the License.", ":: OSI Approved :: Apache License v2', 'Programming Language :: Python :: 3',", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "under the License. # Always prefer setuptools over distutils from setuptools import setup,", "import pathlib here = pathlib.Path(__file__).parent.resolve() # Get the long description from the README", ":: Python :: 3 :: Only', ], package_dir={'': 'ShortLink'}, packages=find_packages(where='ShortLink'), python_requires='>=3.6, <4', install_requires=['validators'],", "3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',", "the License. # You may obtain a copy of the License at #", "Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming", "from setuptools import setup, find_packages import pathlib here = pathlib.Path(__file__).parent.resolve() # Get the", "description from the README file long_description = (here / 'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0',", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "alone link shorten utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License :: OSI", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "file long_description = (here / 'README.md').read_text(encoding='utf-8') setup( name='ShortLink', version='0.1.0', description='Python library that act", "required by applicable law or agreed to in writing, software # distributed under", "License v2', 'Programming Language :: Python :: 3', 'Programming Language :: Python ::", "'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming", "applicable law or agreed to in writing, software # distributed under the License", "name='ShortLink', version='0.1.0', description='Python library that act as a stand alone link shorten utility.',", "link shorten utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License :: OSI Approved", "author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License :: OSI Approved :: Apache License v2', 'Programming Language", "License. # Always prefer setuptools over distutils from setuptools import setup, find_packages import", "= pathlib.Path(__file__).parent.resolve() # Get the long description from the README file long_description =", "or agreed to in writing, software # distributed under the License is distributed", "and # limitations under the License. # Always prefer setuptools over distutils from", ":: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python ::", "or implied. # See the License for the specific language governing permissions and", "Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python", "Language :: Python :: 3.9', 'Programming Language :: Python :: 3 :: Only',", "the specific language governing permissions and # limitations under the License. # Always", ":: 3.9', 'Programming Language :: Python :: 3 :: Only', ], package_dir={'': 'ShortLink'},", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "long description from the README file long_description = (here / 'README.md').read_text(encoding='utf-8') setup( name='ShortLink',", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "act as a stand alone link shorten utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>',", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", ":: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python ::", "with the License. # You may obtain a copy of the License at", "# limitations under the License. # Always prefer setuptools over distutils from setuptools", "'License :: OSI Approved :: Apache License v2', 'Programming Language :: Python ::", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "utility.', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License :: OSI Approved :: Apache", "in writing, software # distributed under the License is distributed on an \"AS", "long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/aerocyber/ShortLink', author='<NAME>', author_email='<EMAIL>', classifiers=[ 'License :: OSI Approved :: Apache License", ":: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language ::", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "Mnew[kappa] = C[kappa][j] np.sort(Mnew) # check for convergence if np.array_equal(M, Mnew): break M", "n: raise Exception('too many medoids') # randomly initialize an array of k medoid", "{} for t in range(tmax): # determine clusters, i. e. arrays of data", "np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # update cluster medoids", "for kappa in range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J) Mnew[kappa] = C[kappa][j]", "distance return D def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2,", "traj1)[0]) return d size = len(traj_list) for i in range(size): traj_list[i] = np.array(traj_list[i])", "np.copy(M) # initialize a dictionary to represent clusters C = {} for t", "#D[D == 0] = math.inf if k > n: raise Exception('too many medoids')", "threshold): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d size", "for j in range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) D[i, j]", "if distance < threshold: D[i, j] = distance D[j, i] = distance return", "hausdorf(traj_list[i], traj_list[j]) D[i, j] = distance D[j, i] = distance return D def", "np.argmin(J) Mnew[kappa] = C[kappa][j] np.sort(Mnew) # check for convergence if np.array_equal(M, Mnew): break", "i in range(size): for j in range(i + 1, size): distance = hausdorf(traj_list[i],", "raise Exception('too many medoids') # randomly initialize an array of k medoid indices", "lil_matrix((size, size)) for i in range(size): for j in range(i + 1, size):", "distance D[j, i] = distance return D def kMedoids(D, k, tmax=100): # determine", "D def kMedoids(D, k, tmax=100): # determine dimensions of distance matrix D m,", "size): distance = hausdorf(traj_list[i], traj_list[j]) D[i, j] = distance D[j, i] = distance", "= np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # update cluster", "kappa in range(k): C[kappa] = np.where(J==kappa)[0] # update cluster medoids for kappa in", "max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d size = len(traj_list) for i in range(size):", "size = len(traj_list) for i in range(size): traj_list[i] = np.array(traj_list[i]) D = np.empty((size,", "J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # update", "range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J) Mnew[kappa] = C[kappa][j] np.sort(Mnew) # check", "= D.shape #D = D.todense() #D[D == 0] = math.inf if k >", "lil_matrix from scipy.spatial.distance import directed_hausdorff import numpy as np import math def calculate_distance_matrix(traj_list,", "# final update of cluster memberships J = np.argmin(D[:,M], axis=1) for kappa in", "D[i, j] = distance D[j, i] = distance return D def kMedoids(D, k,", "to represent clusters C = {} for t in range(tmax): # determine clusters,", "of the array of medoid indices Mnew = np.copy(M) # initialize a dictionary", "= np.copy(M) # initialize a dictionary to represent clusters C = {} for", "distance D[j, i] = distance return D def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2): d", "+ 1, size): distance = hausdorf(traj_list[i], traj_list[j]) if distance < threshold: D[i, j]", "axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # update cluster medoids for", "= distance D[j, i] = distance return D def kMedoids(D, k, tmax=100): #", "D = np.empty((size, size)) for i in range(size): for j in range(i +", "in range(tmax): # determine clusters, i. e. arrays of data indices J =", "clusters, i. e. arrays of data indices J = np.argmin(D[:,M], axis=1) for kappa", "cluster medoids for kappa in range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J) Mnew[kappa]", "in range(size): for j in range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j])", "axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # return results return M,", "range(size): for j in range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) D[i,", "1, size): distance = hausdorf(traj_list[i], traj_list[j]) if distance < threshold: D[i, j] =", "for j in range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) if distance", "in range(size): traj_list[i] = np.array(traj_list[i]) D = lil_matrix((size, size)) for i in range(size):", "0] = math.inf if k > n: raise Exception('too many medoids') # randomly", "np.empty((size, size)) for i in range(size): for j in range(i + 1, size):", "np.sort(M[:k]) # create a copy of the array of medoid indices Mnew =", "of cluster memberships J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] =", "Exception('too many medoids') # randomly initialize an array of k medoid indices M", "= np.array(traj_list[i]) D = np.empty((size, size)) for i in range(size): for j in", "of medoid indices Mnew = np.copy(M) # initialize a dictionary to represent clusters", "initialize an array of k medoid indices M = np.arange(n) np.random.shuffle(M) M =", "of distance matrix D m, n = D.shape #D = D.todense() #D[D ==", "import lil_matrix from scipy.spatial.distance import directed_hausdorff import numpy as np import math def", "of k medoid indices M = np.arange(n) np.random.shuffle(M) M = np.sort(M[:k]) # create", "# create a copy of the array of medoid indices Mnew = np.copy(M)", "C = {} for t in range(tmax): # determine clusters, i. e. arrays", "M = np.copy(Mnew) else: # final update of cluster memberships J = np.argmin(D[:,M],", "range(size): traj_list[i] = np.array(traj_list[i]) D = lil_matrix((size, size)) for i in range(size): for", "determine dimensions of distance matrix D m, n = D.shape #D = D.todense()", "k, tmax=100): # determine dimensions of distance matrix D m, n = D.shape", "D.shape #D = D.todense() #D[D == 0] = math.inf if k > n:", "= np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J) Mnew[kappa] = C[kappa][j] np.sort(Mnew) # check for convergence", "= distance D[j, i] = distance return D def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2):", "< threshold: D[i, j] = distance D[j, i] = distance return D def", "an array of k medoid indices M = np.arange(n) np.random.shuffle(M) M = np.sort(M[:k])", "1, size): distance = hausdorf(traj_list[i], traj_list[j]) D[i, j] = distance D[j, i] =", "scipy.sparse import lil_matrix from scipy.spatial.distance import directed_hausdorff import numpy as np import math", "calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d size", "medoid indices M = np.arange(n) np.random.shuffle(M) M = np.sort(M[:k]) # create a copy", "convergence if np.array_equal(M, Mnew): break M = np.copy(Mnew) else: # final update of", "for t in range(tmax): # determine clusters, i. e. arrays of data indices", "= np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # return results", "traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d size = len(traj_list) for", "D m, n = D.shape #D = D.todense() #D[D == 0] = math.inf", "np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # return results return", "distance = hausdorf(traj_list[i], traj_list[j]) D[i, j] = distance D[j, i] = distance return", "= np.arange(n) np.random.shuffle(M) M = np.sort(M[:k]) # create a copy of the array", "scipy.spatial.distance import directed_hausdorff import numpy as np import math def calculate_distance_matrix(traj_list, threshold): def", "memberships J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] #", "= np.empty((size, size)) for i in range(size): for j in range(i + 1,", "n = D.shape #D = D.todense() #D[D == 0] = math.inf if k", "calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d", "D[i, j] = distance D[j, i] = distance return D def calculate_dense_distance_matrix(traj_list): def", "for i in range(size): traj_list[i] = np.array(traj_list[i]) D = lil_matrix((size, size)) for i", "distance = hausdorf(traj_list[i], traj_list[j]) if distance < threshold: D[i, j] = distance D[j,", "math.inf if k > n: raise Exception('too many medoids') # randomly initialize an", "create a copy of the array of medoid indices Mnew = np.copy(M) #", "copy of the array of medoid indices Mnew = np.copy(M) # initialize a", "arrays of data indices J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa]", "threshold: D[i, j] = distance D[j, i] = distance return D def calculate_dense_distance_matrix(traj_list):", "indices Mnew = np.copy(M) # initialize a dictionary to represent clusters C =", "determine clusters, i. e. arrays of data indices J = np.argmin(D[:,M], axis=1) for", "np.array_equal(M, Mnew): break M = np.copy(Mnew) else: # final update of cluster memberships", "a dictionary to represent clusters C = {} for t in range(tmax): #", "for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # return results return M, C", "# check for convergence if np.array_equal(M, Mnew): break M = np.copy(Mnew) else: #", "= D.todense() #D[D == 0] = math.inf if k > n: raise Exception('too", "= distance return D def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0],", "i] = distance return D def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1,", "= {} for t in range(tmax): # determine clusters, i. e. arrays of", "update of cluster memberships J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa]", "matrix D m, n = D.shape #D = D.todense() #D[D == 0] =", "in range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J) Mnew[kappa] = C[kappa][j] np.sort(Mnew) #", "as np import math def calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1,", "for i in range(size): for j in range(i + 1, size): distance =", "def calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return", "= np.copy(Mnew) else: # final update of cluster memberships J = np.argmin(D[:,M], axis=1)", "i in range(size): traj_list[i] = np.array(traj_list[i]) D = lil_matrix((size, size)) for i in", "range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) D[i, j] = distance D[j,", "if np.array_equal(M, Mnew): break M = np.copy(Mnew) else: # final update of cluster", "math def calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0])", "size = len(traj_list) for i in range(size): traj_list[i] = np.array(traj_list[i]) D = lil_matrix((size,", "for i in range(size): traj_list[i] = np.array(traj_list[i]) D = np.empty((size, size)) for i", "C[kappa][j] np.sort(Mnew) # check for convergence if np.array_equal(M, Mnew): break M = np.copy(Mnew)", "np.where(J==kappa)[0] # update cluster medoids for kappa in range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j", "size): distance = hausdorf(traj_list[i], traj_list[j]) if distance < threshold: D[i, j] = distance", "in range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) if distance < threshold:", "import numpy as np import math def calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1, traj2): d", "D = lil_matrix((size, size)) for i in range(size): for j in range(i +", "np.random.shuffle(M) M = np.sort(M[:k]) # create a copy of the array of medoid", "clusters C = {} for t in range(tmax): # determine clusters, i. e.", "kappa in range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J) Mnew[kappa] = C[kappa][j] np.sort(Mnew)", "if k > n: raise Exception('too many medoids') # randomly initialize an array", "= np.sort(M[:k]) # create a copy of the array of medoid indices Mnew", "initialize a dictionary to represent clusters C = {} for t in range(tmax):", "= lil_matrix((size, size)) for i in range(size): for j in range(i + 1,", "distance < threshold: D[i, j] = distance D[j, i] = distance return D", "range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) if distance < threshold: D[i,", "return d size = len(traj_list) for i in range(size): traj_list[i] = np.array(traj_list[i]) D", "= len(traj_list) for i in range(size): traj_list[i] = np.array(traj_list[i]) D = np.empty((size, size))", "in range(size): traj_list[i] = np.array(traj_list[i]) D = np.empty((size, size)) for i in range(size):", "M = np.sort(M[:k]) # create a copy of the array of medoid indices", "# initialize a dictionary to represent clusters C = {} for t in", "for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # update cluster medoids for kappa", "final update of cluster memberships J = np.argmin(D[:,M], axis=1) for kappa in range(k):", "directed_hausdorff import numpy as np import math def calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1, traj2):", "in range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) D[i, j] = distance", "i. e. arrays of data indices J = np.argmin(D[:,M], axis=1) for kappa in", "# randomly initialize an array of k medoid indices M = np.arange(n) np.random.shuffle(M)", "J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] # return", "J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J) Mnew[kappa] = C[kappa][j] np.sort(Mnew) # check for", "a copy of the array of medoid indices Mnew = np.copy(M) # initialize", "dimensions of distance matrix D m, n = D.shape #D = D.todense() #D[D", "distance return D def kMedoids(D, k, tmax=100): # determine dimensions of distance matrix", "Mnew = np.copy(M) # initialize a dictionary to represent clusters C = {}", "size)) for i in range(size): for j in range(i + 1, size): distance", "j in range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) if distance <", "= C[kappa][j] np.sort(Mnew) # check for convergence if np.array_equal(M, Mnew): break M =", "numpy as np import math def calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1, traj2): d =", "data indices J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0]", "of data indices J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] =", "= hausdorf(traj_list[i], traj_list[j]) if distance < threshold: D[i, j] = distance D[j, i]", "def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d", "len(traj_list) for i in range(size): traj_list[i] = np.array(traj_list[i]) D = lil_matrix((size, size)) for", "<reponame>gyplus/comparing-trajectory-clustering-methods from scipy.sparse import lil_matrix from scipy.spatial.distance import directed_hausdorff import numpy as np", "np.arange(n) np.random.shuffle(M) M = np.sort(M[:k]) # create a copy of the array of", "kMedoids(D, k, tmax=100): # determine dimensions of distance matrix D m, n =", "cluster memberships J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0]", "i] = distance return D def kMedoids(D, k, tmax=100): # determine dimensions of", "m, n = D.shape #D = D.todense() #D[D == 0] = math.inf if", "d size = len(traj_list) for i in range(size): traj_list[i] = np.array(traj_list[i]) D =", "j in range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) D[i, j] =", "traj_list[j]) D[i, j] = distance D[j, i] = distance return D def kMedoids(D,", "# determine clusters, i. e. arrays of data indices J = np.argmin(D[:,M], axis=1)", "k medoid indices M = np.arange(n) np.random.shuffle(M) M = np.sort(M[:k]) # create a", "dictionary to represent clusters C = {} for t in range(tmax): # determine", "def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d size =", "return D def kMedoids(D, k, tmax=100): # determine dimensions of distance matrix D", "randomly initialize an array of k medoid indices M = np.arange(n) np.random.shuffle(M) M", "array of k medoid indices M = np.arange(n) np.random.shuffle(M) M = np.sort(M[:k]) #", "check for convergence if np.array_equal(M, Mnew): break M = np.copy(Mnew) else: # final", "def kMedoids(D, k, tmax=100): # determine dimensions of distance matrix D m, n", "= distance return D def kMedoids(D, k, tmax=100): # determine dimensions of distance", "C[kappa] = np.where(J==kappa)[0] # update cluster medoids for kappa in range(k): J =", "hausdorf(traj_list[i], traj_list[j]) if distance < threshold: D[i, j] = distance D[j, i] =", "medoids for kappa in range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J) Mnew[kappa] =", "distance matrix D m, n = D.shape #D = D.todense() #D[D == 0]", "e. arrays of data indices J = np.argmin(D[:,M], axis=1) for kappa in range(k):", "#D = D.todense() #D[D == 0] = math.inf if k > n: raise", "array of medoid indices Mnew = np.copy(M) # initialize a dictionary to represent", "np.sort(Mnew) # check for convergence if np.array_equal(M, Mnew): break M = np.copy(Mnew) else:", "np import math def calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0],", "import directed_hausdorff import numpy as np import math def calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1,", "M = np.arange(n) np.random.shuffle(M) M = np.sort(M[:k]) # create a copy of the", "j] = distance D[j, i] = distance return D def kMedoids(D, k, tmax=100):", "= len(traj_list) for i in range(size): traj_list[i] = np.array(traj_list[i]) D = lil_matrix((size, size))", "i in range(size): traj_list[i] = np.array(traj_list[i]) D = np.empty((size, size)) for i in", "# update cluster medoids for kappa in range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j =", "the array of medoid indices Mnew = np.copy(M) # initialize a dictionary to", "traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d size = len(traj_list) for i in range(size): traj_list[i]", "indices J = np.argmin(D[:,M], axis=1) for kappa in range(k): C[kappa] = np.where(J==kappa)[0] #", "Mnew): break M = np.copy(Mnew) else: # final update of cluster memberships J", "range(size): for j in range(i + 1, size): distance = hausdorf(traj_list[i], traj_list[j]) if", "range(size): traj_list[i] = np.array(traj_list[i]) D = np.empty((size, size)) for i in range(size): for", "tmax=100): # determine dimensions of distance matrix D m, n = D.shape #D", "d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d size = len(traj_list) for i", "= math.inf if k > n: raise Exception('too many medoids') # randomly initialize", "import math def calculate_distance_matrix(traj_list, threshold): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2,", "medoid indices Mnew = np.copy(M) # initialize a dictionary to represent clusters C", "for convergence if np.array_equal(M, Mnew): break M = np.copy(Mnew) else: # final update", "in range(k): C[kappa] = np.where(J==kappa)[0] # update cluster medoids for kappa in range(k):", "D def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return", "np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J) Mnew[kappa] = C[kappa][j] np.sort(Mnew) # check for convergence if", "hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d size = len(traj_list)", "medoids') # randomly initialize an array of k medoid indices M = np.arange(n)", "np.array(traj_list[i]) D = lil_matrix((size, size)) for i in range(size): for j in range(i", "= max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0]) return d size = len(traj_list) for i in", "from scipy.spatial.distance import directed_hausdorff import numpy as np import math def calculate_distance_matrix(traj_list, threshold):", "directed_hausdorff(traj2, traj1)[0]) return d size = len(traj_list) for i in range(size): traj_list[i] =", "== 0] = math.inf if k > n: raise Exception('too many medoids') #", "traj_list[i] = np.array(traj_list[i]) D = np.empty((size, size)) for i in range(size): for j", "D[j, i] = distance return D def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2): d =", "represent clusters C = {} for t in range(tmax): # determine clusters, i.", "else: # final update of cluster memberships J = np.argmin(D[:,M], axis=1) for kappa", "range(tmax): # determine clusters, i. e. arrays of data indices J = np.argmin(D[:,M],", "j = np.argmin(J) Mnew[kappa] = C[kappa][j] np.sort(Mnew) # check for convergence if np.array_equal(M,", "D[j, i] = distance return D def kMedoids(D, k, tmax=100): # determine dimensions", "np.copy(Mnew) else: # final update of cluster memberships J = np.argmin(D[:,M], axis=1) for", "k > n: raise Exception('too many medoids') # randomly initialize an array of", "= np.where(J==kappa)[0] # update cluster medoids for kappa in range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1)", "+ 1, size): distance = hausdorf(traj_list[i], traj_list[j]) D[i, j] = distance D[j, i]", "traj_list[j]) if distance < threshold: D[i, j] = distance D[j, i] = distance", "return D def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1, traj2): d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0])", "range(k): C[kappa] = np.where(J==kappa)[0] # update cluster medoids for kappa in range(k): J", "many medoids') # randomly initialize an array of k medoid indices M =", "= hausdorf(traj_list[i], traj_list[j]) D[i, j] = distance D[j, i] = distance return D", "j] = distance D[j, i] = distance return D def calculate_dense_distance_matrix(traj_list): def hausdorf(traj1,", "len(traj_list) for i in range(size): traj_list[i] = np.array(traj_list[i]) D = np.empty((size, size)) for", "from scipy.sparse import lil_matrix from scipy.spatial.distance import directed_hausdorff import numpy as np import", "traj_list[i] = np.array(traj_list[i]) D = lil_matrix((size, size)) for i in range(size): for j", "# determine dimensions of distance matrix D m, n = D.shape #D =", "t in range(tmax): # determine clusters, i. e. arrays of data indices J", "= np.array(traj_list[i]) D = lil_matrix((size, size)) for i in range(size): for j in", "np.array(traj_list[i]) D = np.empty((size, size)) for i in range(size): for j in range(i", "indices M = np.arange(n) np.random.shuffle(M) M = np.sort(M[:k]) # create a copy of", "break M = np.copy(Mnew) else: # final update of cluster memberships J =", "update cluster medoids for kappa in range(k): J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1) j = np.argmin(J)", "= np.argmin(J) Mnew[kappa] = C[kappa][j] np.sort(Mnew) # check for convergence if np.array_equal(M, Mnew):", "D.todense() #D[D == 0] = math.inf if k > n: raise Exception('too many", "> n: raise Exception('too many medoids') # randomly initialize an array of k" ]
[ "class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List of methods 'managed' by this", "def to_string(self): return str(self.val) def my_pp_func(val): if not is_cista_vector(val.type): return return CistaVectorPrinter(val) ###", "method_name == 'at': return CistaVectorWorker_at() ### XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return", "self.val[idx] def to_string(self): return str(self.val) def my_pp_func(val): if not is_cista_vector(val.type): return return CistaVectorPrinter(val)", "method_name): if not is_cista_vector(class_type): return None workers = [] for method in self.methods:", "vector out of bounds\") return None return self[idx] class CistaVectorPrinter: def __init__(self, val):", "None workers = [] for method in self.methods: if method.enabled: worker = method.get_worker(method_name)", "obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec.at(idx) class", "### XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self,", "if method_name == 'operator[]': return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') #", "import re import gdb.xmethod def is_cista_vector(gdb_type): return str(gdb_type.strip_typedefs().unqualified()).startswith(\"cista::basic_vector\") def is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\")", "__getitem__(self, idx): return (self.el + idx).dereference() def at(self, idx): if (self.size < idx):", "yield '[' + str(idx) + ']', self.val[idx] def to_string(self): return str(self.val) def my_pp_func(val):", "this matcher self.methods = [CistaVector_at(), CistaVector_operator_brackets()] def match(self, class_type, method_name): if not is_cista_vector(class_type):", "vec = CistaVector(this.dereference()) return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self,", "return str(gdb_type.strip_typedefs().unqualified()).startswith(\"cista::basic_vector\") def is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def __init__(self, val): self.val", "self.methods = [CistaVector_at(), CistaVector_operator_brackets()] def match(self, class_type, method_name): if not is_cista_vector(class_type): return None", "obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def", "__init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List of methods 'managed' by this matcher self.methods =", "return gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx):", "= val self.size = val['used_size_'] self.el = val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_']) def", "return None workers = [] for method in self.methods: if method.enabled: worker =", "str(idx) + ']', self.val[idx] def to_string(self): return str(self.val) def my_pp_func(val): if not is_cista_vector(val.type):", "import gdb.xmethod def is_cista_vector(gdb_type): return str(gdb_type.strip_typedefs().unqualified()).startswith(\"cista::basic_vector\") def is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector:", "def children(self): for idx in range(len(self.val)): yield '[' + str(idx) + ']', self.val[idx]", "is_cista_vector(val.type): return return CistaVectorPrinter(val) ### XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned", "__init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self, method_name): if method_name == 'at': return CistaVectorWorker_at() ###", "my_pp_func(val): if not is_cista_vector(val.type): return return CistaVectorPrinter(val) ### XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def", "XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj):", "idx): return (self.el + idx).dereference() def at(self, idx): if (self.size < idx): print(\"Accessing", "cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return", "__call__(self, this, idx): vec = CistaVector(this.dereference()) return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self,", "class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0)", "def at(self, idx): if (self.size < idx): print(\"Accessing vector out of bounds\") return", "this, idx): vec = CistaVector(this.dereference()) return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at')", "is_cista_vector(class_type): return None workers = [] for method in self.methods: if method.enabled: worker", "return CistaVectorPrinter(val) ### XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int')", "for idx in range(len(self.val)): yield '[' + str(idx) + ']', self.val[idx] def to_string(self):", "self.methods: if method.enabled: worker = method.get_worker(method_name) if worker: workers.append(worker) return workers gdb.pretty_printers.append(my_pp_func) gdb.xmethod.register_xmethod_matcher(None,", "if not is_cista_vector(val.type): return return CistaVectorPrinter(val) ### XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self):", "return CistaVectorWorker_at() ### XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int')", "'[' + str(idx) + ']', self.val[idx] def to_string(self): return str(self.val) def my_pp_func(val): if", "# List of methods 'managed' by this matcher self.methods = [CistaVector_at(), CistaVector_operator_brackets()] def", "CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def", "by this matcher self.methods = [CistaVector_at(), CistaVector_operator_brackets()] def match(self, class_type, method_name): if not", "is_cista_vector(gdb_type): return str(gdb_type.strip_typedefs().unqualified()).startswith(\"cista::basic_vector\") def is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def __init__(self, val):", "(self.size < idx): print(\"Accessing vector out of bounds\") return None return self[idx] class", "= [] for method in self.methods: if method.enabled: worker = method.get_worker(method_name) if worker:", "']', self.val[idx] def to_string(self): return str(self.val) def my_pp_func(val): if not is_cista_vector(val.type): return return", "gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec", "def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self, method_name): if method_name == 'at': return CistaVectorWorker_at()", "of methods 'managed' by this matcher self.methods = [CistaVector_at(), CistaVector_operator_brackets()] def match(self, class_type,", "gdb.xmethod def is_cista_vector(gdb_type): return str(gdb_type.strip_typedefs().unqualified()).startswith(\"cista::basic_vector\") def is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def", "get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec[idx]", "return (self.el + idx).dereference() def at(self, idx): if (self.size < idx): print(\"Accessing vector", "def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def __init__(self):", "val): self.val = CistaVector(val) def children(self): for idx in range(len(self.val)): yield '[' +", "get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec.at(idx)", "str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def __init__(self, val): self.val = val self.size = val['used_size_'] self.el", "range(len(self.val)): yield '[' + str(idx) + ']', self.val[idx] def to_string(self): return str(self.val) def", "vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self, method_name): if method_name ==", "if (self.size < idx): print(\"Accessing vector out of bounds\") return None return self[idx]", "def get_worker(self, method_name): if method_name == 'at': return CistaVectorWorker_at() ### XMethod cista::vector::operator[] class", "CistaVector_operator_brackets()] def match(self, class_type, method_name): if not is_cista_vector(class_type): return None workers = []", "method in self.methods: if method.enabled: worker = method.get_worker(method_name) if worker: workers.append(worker) return workers", "return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def __init__(self, val): self.val = val self.size =", "self.el = val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_']) def __len__(self): return self.size def __getitem__(self,", "idx): print(\"Accessing vector out of bounds\") return None return self[idx] class CistaVectorPrinter: def", "gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List of methods 'managed' by this matcher self.methods = [CistaVector_at(),", "class CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self, method_name): if method_name == 'at':", "vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self, method_name): if method_name ==", "at(self, idx): if (self.size < idx): print(\"Accessing vector out of bounds\") return None", "CistaVector(this.dereference()) return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self, method_name): if", "idx): vec = CistaVector(this.dereference()) return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def", "methods 'managed' by this matcher self.methods = [CistaVector_at(), CistaVector_operator_brackets()] def match(self, class_type, method_name):", "List of methods 'managed' by this matcher self.methods = [CistaVector_at(), CistaVector_operator_brackets()] def match(self,", "__init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self, method_name): if method_name == 'operator[]': return CistaVectorWorker_operator_brackets() class", "= [CistaVector_at(), CistaVector_operator_brackets()] def match(self, class_type, method_name): if not is_cista_vector(class_type): return None workers", "if method.enabled: worker = method.get_worker(method_name) if worker: workers.append(worker) return workers gdb.pretty_printers.append(my_pp_func) gdb.xmethod.register_xmethod_matcher(None, CistaVectorMatcher())", "idx): vec = CistaVector(this.dereference()) return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def", "== 'at': return CistaVectorWorker_at() ### XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned", "[] for method in self.methods: if method.enabled: worker = method.get_worker(method_name) if worker: workers.append(worker)", "val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_']) def __len__(self): return self.size def __getitem__(self, idx): return", "CistaVector: def __init__(self, val): self.val = val self.size = val['used_size_'] self.el = val['el_']", "CistaVector(this.dereference()) return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self, method_name): if", "val): self.val = val self.size = val['used_size_'] self.el = val['el_'] if is_raw_vector(val.type) else", "< idx): print(\"Accessing vector out of bounds\") return None return self[idx] class CistaVectorPrinter:", "'managed' by this matcher self.methods = [CistaVector_at(), CistaVector_operator_brackets()] def match(self, class_type, method_name): if", "not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def __init__(self, val): self.val = val self.size = val['used_size_']", "__call__(self, this, idx): vec = CistaVector(this.dereference()) return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self,", "this, idx): vec = CistaVector(this.dereference()) return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]')", "class CistaVector: def __init__(self, val): self.val = val self.size = val['used_size_'] self.el =", "= CistaVector(this.dereference()) return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self, method_name):", "def get_worker(self, method_name): if method_name == 'operator[]': return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self):", "str(self.val) def my_pp_func(val): if not is_cista_vector(val.type): return return CistaVectorPrinter(val) ### XMethod cista::vector::at class", "+ ']', self.val[idx] def to_string(self): return str(self.val) def my_pp_func(val): if not is_cista_vector(val.type): return", "out of bounds\") return None return self[idx] class CistaVectorPrinter: def __init__(self, val): self.val", "obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec[idx] class", "if is_raw_vector(val.type) else OffsetPointer(val['el_']) def __len__(self): return self.size def __getitem__(self, idx): return (self.el", "idx).dereference() def at(self, idx): if (self.size < idx): print(\"Accessing vector out of bounds\")", "of bounds\") return None return self[idx] class CistaVectorPrinter: def __init__(self, val): self.val =", "def is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def __init__(self, val): self.val = val", "return str(self.val) def my_pp_func(val): if not is_cista_vector(val.type): return return CistaVectorPrinter(val) ### XMethod cista::vector::at", "get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this,", "if not is_cista_vector(class_type): return None workers = [] for method in self.methods: if", "OffsetPointer(val['el_']) def __len__(self): return self.size def __getitem__(self, idx): return (self.el + idx).dereference() def", "return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self, method_name): if method_name", "CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List of methods 'managed' by", "return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod):", "in self.methods: if method.enabled: worker = method.get_worker(method_name) if worker: workers.append(worker) return workers gdb.pretty_printers.append(my_pp_func)", "def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self, method_name): if method_name == 'operator[]': return CistaVectorWorker_operator_brackets()", "gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self, method_name): if method_name == 'at': return CistaVectorWorker_at() ### XMethod", "workers = [] for method in self.methods: if method.enabled: worker = method.get_worker(method_name) if", "CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self, method_name): if method_name == 'at': return", "self[idx] class CistaVectorPrinter: def __init__(self, val): self.val = CistaVector(val) def children(self): for idx", "return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self, method_name): if method_name", "CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List of methods 'managed' by this matcher", "re import gdb.xmethod def is_cista_vector(gdb_type): return str(gdb_type.strip_typedefs().unqualified()).startswith(\"cista::basic_vector\") def is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class", "return return CistaVectorPrinter(val) ### XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long", "children(self): for idx in range(len(self.val)): yield '[' + str(idx) + ']', self.val[idx] def", "get_worker(self, method_name): if method_name == 'operator[]': return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self,", "return self.size def __getitem__(self, idx): return (self.el + idx).dereference() def at(self, idx): if", "method_name): if method_name == 'at': return CistaVectorWorker_at() ### XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def", "def match(self, class_type, method_name): if not is_cista_vector(class_type): return None workers = [] for", "for method in self.methods: if method.enabled: worker = method.get_worker(method_name) if worker: workers.append(worker) return", "= CistaVector(this.dereference()) return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self, method_name):", "bounds\") return None return self[idx] class CistaVectorPrinter: def __init__(self, val): self.val = CistaVector(val)", "in range(len(self.val)): yield '[' + str(idx) + ']', self.val[idx] def to_string(self): return str(self.val)", "str(gdb_type.strip_typedefs().unqualified()).startswith(\"cista::basic_vector\") def is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def __init__(self, val): self.val =", "__len__(self): return self.size def __getitem__(self, idx): return (self.el + idx).dereference() def at(self, idx):", "None return self[idx] class CistaVectorPrinter: def __init__(self, val): self.val = CistaVector(val) def children(self):", "method_name): if method_name == 'operator[]': return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher')", "def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference()) return", "def my_pp_func(val): if not is_cista_vector(val.type): return return CistaVectorPrinter(val) ### XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker):", "__init__(self, val): self.val = val self.size = val['used_size_'] self.el = val['el_'] if is_raw_vector(val.type)", "(self.el + idx).dereference() def at(self, idx): if (self.size < idx): print(\"Accessing vector out", "class_type, method_name): if not is_cista_vector(class_type): return None workers = [] for method in", "return None return self[idx] class CistaVectorPrinter: def __init__(self, val): self.val = CistaVector(val) def", "match(self, class_type, method_name): if not is_cista_vector(class_type): return None workers = [] for method", "if method_name == 'at': return CistaVectorWorker_at() ### XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self):", "return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List of methods 'managed'", "not is_cista_vector(val.type): return return CistaVectorPrinter(val) ### XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return", "return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod):", "CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def", "'at': return CistaVectorWorker_at() ### XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long", "CistaVectorPrinter(val) ### XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def", "'operator[]') def get_worker(self, method_name): if method_name == 'operator[]': return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def", "method_name == 'operator[]': return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List", "[CistaVector_at(), CistaVector_operator_brackets()] def match(self, class_type, method_name): if not is_cista_vector(class_type): return None workers =", "= val['used_size_'] self.el = val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_']) def __len__(self): return self.size", "gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self, method_name): if method_name == 'operator[]': return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher):", "= val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_']) def __len__(self): return self.size def __getitem__(self, idx):", "val self.size = val['used_size_'] self.el = val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_']) def __len__(self):", "CistaVectorWorker_at() ### XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def", "def __init__(self, val): self.val = val self.size = val['used_size_'] self.el = val['el_'] if", "cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return", "self.size def __getitem__(self, idx): return (self.el + idx).dereference() def at(self, idx): if (self.size", "def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self):", "self.val = val self.size = val['used_size_'] self.el = val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_'])", "XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj):", "obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference()) return vec[idx] class CistaVector_operator_brackets(gdb.xmethod.XMethod): def", "CistaVector(val) def children(self): for idx in range(len(self.val)): yield '[' + str(idx) + ']',", "vec = CistaVector(this.dereference()) return vec.at(idx) class CistaVector_at(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'at') def get_worker(self,", "self.size = val['used_size_'] self.el = val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_']) def __len__(self): return", "+ str(idx) + ']', self.val[idx] def to_string(self): return str(self.val) def my_pp_func(val): if not", "not is_cista_vector(class_type): return None workers = [] for method in self.methods: if method.enabled:", "class CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self, method_name): if method_name == 'operator[]':", "idx in range(len(self.val)): yield '[' + str(idx) + ']', self.val[idx] def to_string(self): return", "self.val = CistaVector(val) def children(self): for idx in range(len(self.val)): yield '[' + str(idx)", "class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0)", "CistaVectorPrinter: def __init__(self, val): self.val = CistaVector(val) def children(self): for idx in range(len(self.val)):", "to_string(self): return str(self.val) def my_pp_func(val): if not is_cista_vector(val.type): return return CistaVectorPrinter(val) ### XMethod", "get_worker(self, method_name): if method_name == 'at': return CistaVectorWorker_at() ### XMethod cista::vector::operator[] class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker):", "long int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec =", "is_raw_vector(val.type) else OffsetPointer(val['el_']) def __len__(self): return self.size def __getitem__(self, idx): return (self.el +", "### XMethod cista::vector::at class CistaVectorWorker_at(gdb.xmethod.XMethodWorker): def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self,", "def __getitem__(self, idx): return (self.el + idx).dereference() def at(self, idx): if (self.size <", "'at') def get_worker(self, method_name): if method_name == 'at': return CistaVectorWorker_at() ### XMethod cista::vector::operator[]", "+ idx).dereference() def at(self, idx): if (self.size < idx): print(\"Accessing vector out of", "__init__(self, val): self.val = CistaVector(val) def children(self): for idx in range(len(self.val)): yield '['", "def get_arg_types(self): return gdb.lookup_type('unsigned long int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self,", "CistaVector_operator_brackets(gdb.xmethod.XMethod): def __init__(self): gdb.xmethod.XMethod.__init__(self, 'operator[]') def get_worker(self, method_name): if method_name == 'operator[]': return", "class CistaVectorPrinter: def __init__(self, val): self.val = CistaVector(val) def children(self): for idx in", "<reponame>mayhemheroes/cista import re import gdb.xmethod def is_cista_vector(gdb_type): return str(gdb_type.strip_typedefs().unqualified()).startswith(\"cista::basic_vector\") def is_raw_vector(gdb_type): return not", "is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def __init__(self, val): self.val = val self.size", "= CistaVector(val) def children(self): for idx in range(len(self.val)): yield '[' + str(idx) +", "def is_cista_vector(gdb_type): return str(gdb_type.strip_typedefs().unqualified()).startswith(\"cista::basic_vector\") def is_raw_vector(gdb_type): return not str(gdb_type.strip_typedefs().template_argument(1)).startswith(\"cista::offset_ptr\") class CistaVector: def __init__(self,", "'operator[]': return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List of methods", "def __init__(self, val): self.val = CistaVector(val) def children(self): for idx in range(len(self.val)): yield", "return self[idx] class CistaVectorPrinter: def __init__(self, val): self.val = CistaVector(val) def children(self): for", "def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List of methods 'managed' by this matcher self.methods", "val['used_size_'] self.el = val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_']) def __len__(self): return self.size def", "idx): if (self.size < idx): print(\"Accessing vector out of bounds\") return None return", "'CistaVectorMatcher') # List of methods 'managed' by this matcher self.methods = [CistaVector_at(), CistaVector_operator_brackets()]", "print(\"Accessing vector out of bounds\") return None return self[idx] class CistaVectorPrinter: def __init__(self,", "== 'operator[]': return CistaVectorWorker_operator_brackets() class CistaVectorMatcher(gdb.xmethod.XMethodMatcher): def __init__(self): gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher') # List of", "matcher self.methods = [CistaVector_at(), CistaVector_operator_brackets()] def match(self, class_type, method_name): if not is_cista_vector(class_type): return", "def __len__(self): return self.size def __getitem__(self, idx): return (self.el + idx).dereference() def at(self,", "int') def get_result_type(self, obj): return obj.type.strip_typedefs().template_argument(0) def __call__(self, this, idx): vec = CistaVector(this.dereference())", "else OffsetPointer(val['el_']) def __len__(self): return self.size def __getitem__(self, idx): return (self.el + idx).dereference()" ]
[ "error control in numerical computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers =", "sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable", "provide low-scaling algorithms as well as rigorous error control in numerical computations.\"\"\" homepage", "depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def cmake_args(self): args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in self.spec", "purpose numerical mathematics library based on multiresolution analysis and the multiwavelet basis which", "if \"+openmp\" in self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\" if \"+mpi\" in self.spec else \"OFF\"),", "in numerical computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\", \"stigrj\",", "other # Spack Project Developers. See the top-level COPYRIGHT file for details. #", "(Apache-2.0 OR MIT) from spack import * class Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation Program", "Livermore National Security, LLC and other # Spack Project Developers. See the top-level", "algorithms as well as rigorous error control in numerical computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\"", "= [\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c')", "OpenMP support.\") variant(\"mpi\", default=True, description=\"Enable MPI support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def", "sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable OpenMP support.\") variant(\"mpi\", default=True, description=\"Enable", "top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack", "homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40')", "sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable OpenMP support.\")", "variant(\"openmp\", default=True, description=\"Enable OpenMP support.\") variant(\"mpi\", default=True, description=\"Enable MPI support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\",", "class Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation Program Package (MRCPP) is a general purpose numerical", "preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable OpenMP support.\") variant(\"mpi\",", "= \"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0',", "sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable OpenMP support.\") variant(\"mpi\", default=True, description=\"Enable MPI support\")", "from spack import * class Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation Program Package (MRCPP) is", "general purpose numerical mathematics library based on multiresolution analysis and the multiwavelet basis", "and the multiwavelet basis which provide low-scaling algorithms as well as rigorous error", "support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def cmake_args(self): args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if", "import * class Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation Program Package (MRCPP) is a general", "depends_on(\"eigen\") def cmake_args(self): args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in self.spec else \"OFF\"),", "def cmake_args(self): args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\"", "based on multiresolution analysis and the multiwavelet basis which provide low-scaling algorithms as", "basis which provide low-scaling algorithms as well as rigorous error control in numerical", "(MRCPP) is a general purpose numerical mathematics library based on multiresolution analysis and", "depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def cmake_args(self): args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\"", "= \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2',", "rigorous error control in numerical computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers", "variant(\"mpi\", default=True, description=\"Enable MPI support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def cmake_args(self): args", "\"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2',", "when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def cmake_args(self): args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in", "cmake_args(self): args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\" if", "MultiResolution Computation Program Package (MRCPP) is a general purpose numerical mathematics library based", "mathematics library based on multiresolution analysis and the multiwavelet basis which provide low-scaling", "default=True, description=\"Enable OpenMP support.\") variant(\"mpi\", default=True, description=\"Enable MPI support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\")", "version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable OpenMP support.\") variant(\"mpi\", default=True, description=\"Enable MPI support\") depends_on(\"mpi\",", "Package (MRCPP) is a general purpose numerical mathematics library based on multiresolution analysis", "spack import * class Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation Program Package (MRCPP) is a", "version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable OpenMP support.\") variant(\"mpi\", default=True,", "<reponame>xiki-tempula/spack # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack", "is a general purpose numerical mathematics library based on multiresolution analysis and the", "default=True, description=\"Enable MPI support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def cmake_args(self): args =", "analysis and the multiwavelet basis which provide low-scaling algorithms as well as rigorous", "self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\" if \"+mpi\" in self.spec else \"OFF\"), ] return args", "control in numerical computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\",", "# # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Mrcpp(CMakePackage): \"\"\"The", "# Spack Project Developers. See the top-level COPYRIGHT file for details. # #", "as rigorous error control in numerical computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\"", "Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR", "OR MIT) from spack import * class Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation Program Package", "description=\"Enable OpenMP support.\") variant(\"mpi\", default=True, description=\"Enable MPI support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\")", "[\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0',", "type=\"build\") depends_on(\"eigen\") def cmake_args(self): args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in self.spec else", "maintainers = [\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1',", "as well as rigorous error control in numerical computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url", "version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True,", "Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0", "file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import *", "well as rigorous error control in numerical computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url =", "Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation Program Package (MRCPP) is a general purpose numerical mathematics", "Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier:", "See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT)", "Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file", "* class Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation Program Package (MRCPP) is a general purpose", "low-scaling algorithms as well as rigorous error control in numerical computations.\"\"\" homepage =", "version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable OpenMP", "[ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\" if \"+mpi\" in self.spec", "which provide low-scaling algorithms as well as rigorous error control in numerical computations.\"\"\"", "and other # Spack Project Developers. See the top-level COPYRIGHT file for details.", "MPI support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def cmake_args(self): args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\"", "National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT", "Program Package (MRCPP) is a general purpose numerical mathematics library based on multiresolution", "the multiwavelet basis which provide low-scaling algorithms as well as rigorous error control", "Computation Program Package (MRCPP) is a general purpose numerical mathematics library based on", "url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True)", "for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class", "\"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3')", "multiwavelet basis which provide low-scaling algorithms as well as rigorous error control in", "COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import", "the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from", "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project", "library based on multiresolution analysis and the multiwavelet basis which provide low-scaling algorithms", "\"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\",", "MIT) from spack import * class Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation Program Package (MRCPP)", "\"\"\"The MultiResolution Computation Program Package (MRCPP) is a general purpose numerical mathematics library", "args = [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\" if \"+mpi\"", "description=\"Enable MPI support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def cmake_args(self): args = [", "sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable OpenMP support.\") variant(\"mpi\", default=True, description=\"Enable MPI support\") depends_on(\"mpi\", when=\"+mpi\")", "on multiresolution analysis and the multiwavelet basis which provide low-scaling algorithms as well", "in self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\" if \"+mpi\" in self.spec else \"OFF\"), ] return", "\"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2', sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40') version('1.1.0', sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2', preferred=True) version('1.0.2', sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3') version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd')", "\"+openmp\" in self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\" if \"+mpi\" in self.spec else \"OFF\"), ]", "a general purpose numerical mathematics library based on multiresolution analysis and the multiwavelet", "# SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Mrcpp(CMakePackage): \"\"\"The MultiResolution", "Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers.", "SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Mrcpp(CMakePackage): \"\"\"The MultiResolution Computation", "Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the", "numerical computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\", \"stigrj\", \"ilfreddy\"]", "= [ \"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\" if \"+mpi\" in", "\"-DENABLE_OPENMP={0}\".format(\"ON\" if \"+openmp\" in self.spec else \"OFF\"), \"-DENABLE_MPI={0}\".format(\"ON\" if \"+mpi\" in self.spec else", "2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See", "LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for", "details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Mrcpp(CMakePackage):", "multiresolution analysis and the multiwavelet basis which provide low-scaling algorithms as well as", "computations.\"\"\" homepage = \"https://mrcpp.readthedocs.io/en/latest/\" url = \"https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz\" maintainers = [\"robertodr\", \"stigrj\", \"ilfreddy\"] version('1.2.0-alpha2',", "numerical mathematics library based on multiresolution analysis and the multiwavelet basis which provide", "support.\") variant(\"mpi\", default=True, description=\"Enable MPI support\") depends_on(\"mpi\", when=\"+mpi\") depends_on(\"cmake@3.11:\", type=\"build\") depends_on(\"eigen\") def cmake_args(self):", "version('1.0.1', sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c') version('1.0.0', sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd') variant(\"openmp\", default=True, description=\"Enable OpenMP support.\") variant(\"mpi\", default=True, description=\"Enable MPI" ]
[ "== [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() == [] assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder", "None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False),", "[] assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not in dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr: DirMngr,", "os.remove(str(config_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def tmp_git_folder() -> str: folder_name = \"/tmp/repo/.git\"", "cfg_handler.load() assert loaded == data def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0]", "3, True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", )", "not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not in dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str):", "not in dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder", "== [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) == [] assert tmp_git_folder not in dir_mngr.list_dirs()", "dir_mngr.add(\"/tmp/foo\") == [] assert tmp_git_folder in dir_mngr.dirs def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str):", "dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or", "== [] def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert", "dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) ==", "str): folder = \"/tmp/pit/\" assert dir_mngr.add(folder) == [] def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder:", "assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") == [] assert tmp_git_folder in dir_mngr.dirs def", "\"~/b/repos\", [\".git\"], 3, True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't", "dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) == [] assert tmp_git_folder not in dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr:", "str: folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager: def test_first_save(self,", "tmp_git_folder() -> str: folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager:", "= ConfigHandler(folder_name=folder_name) yield config_handler try: os.remove(str(config_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def tmp_git_folder()", "exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass", "\"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager: def test_first_save(self, cfg_handler: ConfigHandler): data = {\"dirs\": {\"/tmp\": \"/tmp\"},", "not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v11_not_eager(self, benchmark,", "dir_mngr.dirs def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str): folder = \"/tmp/pit/\" assert dir_mngr.add(folder) ==", "def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert", "def dir_mngr() -> DirMngr: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name) yield", "tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def test_add_ignore(self,", "tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() == [] assert", "[tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) == [] assert tmp_git_folder not in dir_mngr.list_dirs() def", "cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def cfg_handler() -> ConfigHandler: folder_name", "== {} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", )", "str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() == [] assert not", "DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) == []", "assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't", "not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v10_eager(self, benchmark,", "assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) ==", ") def test_find_projects_v11_not_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, False)", "\"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data) loaded = cfg_handler.load() assert loaded == data def test_add_dir(self,", "os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def cfg_handler() -> ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\"", "exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager: def test_first_save(self, cfg_handler: ConfigHandler): data = {\"dirs\":", "tmuxdir.dirmngr import ConfigHandler, DirMngr import pytest @pytest.fixture def dir_mngr() -> DirMngr: folder_name =", "in dir_mngr.dirs def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str): folder = \"/tmp/pit/\" assert dir_mngr.add(folder)", "tmp_git_folder: str): folder = \"/tmp/pit/\" assert dir_mngr.add(folder) == [] def test_add_clear(self, dir_mngr: DirMngr,", "folder = \"/tmp/pit/\" assert dir_mngr.add(folder) == [] def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str):", "dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not in dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert", "tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) == [] assert", "tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {}", "{} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)", "assert dir_mngr.add(folder) == [] def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) ==", "os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager: def test_first_save(self, cfg_handler: ConfigHandler): data =", "False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v11_not_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects,", "assert dir_mngr.add(tmp_git_folder) == [] assert tmp_git_folder not in dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr: DirMngr,", "test_first_save(self, cfg_handler: ConfigHandler): data = {\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data) loaded =", "dir_mngr() -> DirMngr: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name) yield DirMngr([],", "{}} cfg_handler.save(data) loaded = cfg_handler.load() assert loaded == data def test_add_dir(self, dir_mngr: DirMngr,", "\"ignored_dirs\": {}} cfg_handler.save(data) loaded = cfg_handler.load() assert loaded == data def test_add_dir(self, dir_mngr:", "assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) == [] assert tmp_git_folder not", "== data def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert", "tmp_git_folder not in dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] ==", "[\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def cfg_handler() -> ConfigHandler:", "cfg_handler() -> ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name) yield config_handler", "assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not in dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder:", "or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v11_not_eager(self, benchmark, dir_mngr: DirMngr)", "test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str): folder = \"/tmp/pit/\" assert dir_mngr.add(folder) == [] def", "@pytest.fixture def cfg_handler() -> ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name)", "assert tmp_git_folder in dir_mngr.dirs def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str): folder = \"/tmp/pit/\"", "\"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name) yield config_handler try: os.remove(str(config_handler._full_path)) except FileNotFoundError: pass", "[] assert tmp_git_folder not in dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert", "dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() ==", "def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert", "cfg_handler.save(data) loaded = cfg_handler.load() assert loaded == data def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder:", "loaded = cfg_handler.load() assert loaded == data def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str):", "test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder)", "== {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str): assert", "dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def test_add_ignore(self, dir_mngr: DirMngr,", "dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert", "assert dir_mngr.list_dirs() == [] assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not in dir_mngr.dirs def", "yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager: def test_first_save(self, cfg_handler: ConfigHandler): data = {\"dirs\": {\"/tmp\":", "str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs == {}", "= \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name) yield config_handler try: os.remove(str(config_handler._full_path)) except FileNotFoundError:", "benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos", "assert dir_mngr.ignored_dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not", "{} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos", "{\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data) loaded = cfg_handler.load() assert loaded == data", "data def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.add(\"/tmp/foo\")", "== tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def", "test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, True) @pytest.mark.skipif( not", "os.removedirs(folder_name) @pytest.fixture def cfg_handler() -> ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler =", "exist\", ) def test_find_projects_v11_not_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3,", "str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) == [] assert tmp_git_folder", "DirMngr([], [\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def cfg_handler() ->", "dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\",", "folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"], cfg_handler=cfg_handler) try:", "dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) == [] assert tmp_git_folder not in", "dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {}", "ConfigHandler(folder_name=folder_name) yield config_handler try: os.remove(str(config_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def tmp_git_folder() ->", "ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name) yield config_handler try: os.remove(str(config_handler._full_path))", "def test_first_save(self, cfg_handler: ConfigHandler): data = {\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data) loaded", "DirMngr: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"], cfg_handler=cfg_handler)", "os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v11_not_eager(self, benchmark, dir_mngr: DirMngr) -> None:", "== tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] ==", "[] def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder)", "= ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture", "== [] assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not in dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr:", "def cfg_handler() -> ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name) yield", "except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def tmp_git_folder() -> str: folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name,", "False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects,", "test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs", "reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\",", "def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") ==", "cfg_handler: ConfigHandler): data = {\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data) loaded = cfg_handler.load()", "ConfigHandler, DirMngr import pytest @pytest.fixture def dir_mngr() -> DirMngr: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name,", "test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") == []", "exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name) yield config_handler try: os.remove(str(config_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture", "tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") == [] assert tmp_git_folder in", "assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def test_add_ignore(self, dir_mngr:", "= {\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data) loaded = cfg_handler.load() assert loaded ==", "assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs == {} assert", "assert tmp_git_folder not in dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0]", "config_handler try: os.remove(str(config_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def tmp_git_folder() -> str: folder_name", "dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs ==", "except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def cfg_handler() -> ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name,", "== [] assert tmp_git_folder not in dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str):", "cfg_handler = ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name)", "-> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\",", "dir_mngr.dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str):", "dir_mngr.list_dirs() == [] assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not in dir_mngr.dirs def test_clear_added_dirs(self,", "data = {\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data) loaded = cfg_handler.load() assert loaded", "not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v11_not_eager(self, benchmark, dir_mngr: DirMngr) ->", "from tmuxdir.dirmngr import ConfigHandler, DirMngr import pytest @pytest.fixture def dir_mngr() -> DirMngr: folder_name", "[\".git\"], 3, True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\",", "folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name) yield config_handler try: os.remove(str(config_handler._full_path)) except", "TestDirManager: def test_first_save(self, cfg_handler: ConfigHandler): data = {\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data)", "assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\"))", "import os from tmuxdir.dirmngr import ConfigHandler, DirMngr import pytest @pytest.fixture def dir_mngr() ->", "yield config_handler try: os.remove(str(config_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def tmp_git_folder() -> str:", "assert dir_mngr.add(\"/tmp/foo\") == [] assert tmp_git_folder in dir_mngr.dirs def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder:", "dir_mngr.ignored_dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\",", "dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.ignore(tmp_git_folder)", "os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) -> None:", "DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs", "== {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False),", "str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") == [] assert tmp_git_folder in dir_mngr.dirs", "def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder) assert", "test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs()", "\"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except", "try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def cfg_handler() -> ConfigHandler: folder_name =", "dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() == [] assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not in dir_mngr.dirs", "== tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") == [] assert tmp_git_folder in dir_mngr.dirs def test_add_dir_list(self, dir_mngr:", "pass os.removedirs(folder_name) @pytest.fixture def tmp_git_folder() -> str: folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield", "import ConfigHandler, DirMngr import pytest @pytest.fixture def dir_mngr() -> DirMngr: folder_name = \"/tmp/tmuxdirtest/\"", "test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs()", "os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v10_eager(self, benchmark, dir_mngr:", "config_handler = ConfigHandler(folder_name=folder_name) yield config_handler try: os.remove(str(config_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def", ") def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, True)", "{\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data) loaded = cfg_handler.load() assert loaded == data def", "DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs == {}", "dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder]", "dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif( not", "-> str: folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager: def", "-> DirMngr: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"],", "os from tmuxdir.dirmngr import ConfigHandler, DirMngr import pytest @pytest.fixture def dir_mngr() -> DirMngr:", "ConfigHandler): data = {\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}} cfg_handler.save(data) loaded = cfg_handler.load() assert", "dir_mngr: DirMngr, tmp_git_folder: str): folder = \"/tmp/pit/\" assert dir_mngr.add(folder) == [] def test_add_clear(self,", "dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY]", "str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY]", "FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def cfg_handler() -> ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True)", "dir_mngr.add(folder) == [] def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder]", "assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() == [] assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not in", "assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] ==", "dir_mngr.add(tmp_git_folder) == [] assert tmp_git_folder not in dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder:", "-> ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name) yield config_handler try:", "DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() == []", "not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) ->", "benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\"))", "doesn't exist\", ) def test_find_projects_v11_not_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"],", "@pytest.fixture def dir_mngr() -> DirMngr: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name)", "def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert", "try: os.remove(str(config_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def tmp_git_folder() -> str: folder_name =", "folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager: def test_first_save(self, cfg_handler:", "True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def", "pytest @pytest.fixture def dir_mngr() -> DirMngr: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler =", "[] assert tmp_git_folder in dir_mngr.dirs def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str): folder =", "DirMngr import pytest @pytest.fixture def dir_mngr() -> DirMngr: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True)", "== {} def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert", "@pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v11_not_eager(self,", "DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") == [] assert tmp_git_folder", "dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") == [] assert", "def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str): folder = \"/tmp/pit/\" assert dir_mngr.add(folder) == []", "or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr)", "assert tmp_git_folder not in dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0]", "not in dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder", "pass os.removedirs(folder_name) @pytest.fixture def cfg_handler() -> ConfigHandler: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) config_handler", "tmp_git_folder not in dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] ==", "reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v11_not_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\",", "in dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert", "dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") == [] assert tmp_git_folder in dir_mngr.dirs def test_add_dir_list(self,", "= \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path))", "def tmp_git_folder() -> str: folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class", "= \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager: def test_first_save(self, cfg_handler: ConfigHandler):", "= \"/tmp/pit/\" assert dir_mngr.add(folder) == [] def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str): assert", "def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, True) @pytest.mark.skipif(", "loaded == data def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder", "tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs ==", "FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def tmp_git_folder() -> str: folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True)", "== [] assert tmp_git_folder in dir_mngr.dirs def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str): folder", "class TestDirManager: def test_first_save(self, cfg_handler: ConfigHandler): data = {\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\": {}}", "\"/tmp/pit/\" assert dir_mngr.add(folder) == [] def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)", "os.makedirs(folder_name, exist_ok=True) config_handler = ConfigHandler(folder_name=folder_name) yield config_handler try: os.remove(str(config_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name)", "assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.clear_ignored_dirs() assert dir_mngr.ignored_dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {} @pytest.mark.skipif(", "\"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name) class TestDirManager: def test_first_save(self, cfg_handler: ConfigHandler): data", "DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not", "assert dir_mngr.dirs == {} assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {} def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder:", "DirMngr, tmp_git_folder: str): folder = \"/tmp/pit/\" assert dir_mngr.add(folder) == [] def test_add_clear(self, dir_mngr:", "os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v11_not_eager(self, benchmark, dir_mngr:", "assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() == [] assert not dir_mngr.clear_added_dir(\"/tmp/random/\")", "{} @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def", "tmp_git_folder assert dir_mngr.add(\"/tmp/foo\") == [] assert tmp_git_folder in dir_mngr.dirs def test_add_dir_list(self, dir_mngr: DirMngr,", "dir_mngr.dirs def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder)", "[tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() == [] assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert tmp_git_folder not", "os.makedirs(folder_name, exist_ok=True) cfg_handler = ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError:", "doesn't exist\", ) def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"],", "os.removedirs(folder_name) class TestDirManager: def test_first_save(self, cfg_handler: ConfigHandler): data = {\"dirs\": {\"/tmp\": \"/tmp\"}, \"ignored_dirs\":", "os.removedirs(folder_name) @pytest.fixture def tmp_git_folder() -> str: folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1])", "tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.dirs == {} assert", "assert dir_mngr.ignore(tmp_git_folder) assert dir_mngr.add(tmp_git_folder) == [] assert tmp_git_folder not in dir_mngr.list_dirs() def test_clear_ignored_dirs(self,", "in dir_mngr.list_dirs() def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder assert", "dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3, True) @pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or", "@pytest.fixture def tmp_git_folder() -> str: folder_name = \"/tmp/repo/.git\" os.makedirs(folder_name, exist_ok=True) yield \"/\".join(folder_name.split(\"/\")[:-1]) os.removedirs(folder_name)", "dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.clear_added_dir(tmp_git_folder) assert dir_mngr.list_dirs() == [] assert not dir_mngr.clear_added_dir(\"/tmp/random/\") assert", "= cfg_handler.load() assert loaded == data def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str): assert", "yield DirMngr([], [\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def cfg_handler()", "ConfigHandler(folder_name=folder_name) yield DirMngr([], [\".git\"], cfg_handler=cfg_handler) try: os.remove(str(cfg_handler._full_path)) except FileNotFoundError: pass os.removedirs(folder_name) @pytest.fixture def", "tmp_git_folder in dir_mngr.dirs def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str): folder = \"/tmp/pit/\" assert", "import pytest @pytest.fixture def dir_mngr() -> DirMngr: folder_name = \"/tmp/tmuxdirtest/\" os.makedirs(folder_name, exist_ok=True) cfg_handler", "exist\", ) def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) -> None: benchmark(dir_mngr.find_projects, \"~/b/repos\", [\".git\"], 3,", "@pytest.mark.skipif( not os.path.isdir(os.path.expanduser(\"~/b/repos\")) or not os.environ.get(\"TMUXDIR_BENCH\", False), reason=\"~/b/repos doesn't exist\", ) def test_find_projects_v10_eager(self,", "{} def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder] assert dir_mngr.ignore(tmp_git_folder)", "assert loaded == data def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str): assert dir_mngr.add(tmp_git_folder)[0] ==" ]
[ "log('K8s yaml policy file generator', 'blue') kube_policy_gen = KubePolicyGen(kind, data) response = kube_policy_gen.populate_config()", "is_valid, policy = validate_yaml(response['data']) if not is_valid: return log('Error occurred: ->' + policy,", "k8s policy file are you trying to create. support type includes deployment, ingress", "def main(kind, data): log('K8s yaml policy file generator', 'blue') kube_policy_gen = KubePolicyGen(kind, data)", "import log, validate_yaml import json import click @click.command() @click.option('--kind', '-k', help='what kind of", ".util import log, validate_yaml import json import click @click.command() @click.option('--kind', '-k', help='what kind", "to create. support type includes deployment, ingress and svc.') @click.option('--data', '-d', help='Supply payload", "click.echo('successfully generate policy file') return log('Success: ->' + json.dumps(response['data']), 'green') if __name__ ==", "if response['status'] == 'error': return log('Error occurred: ->' + json.dumps(response['error']), 'red') is_valid, policy", "the policy file in jsonstring format e.g {\"name\": \"app-1\", \"version\": \"v1\"} ') def", "kind of k8s policy file are you trying to create. support type includes", "= kube_policy_gen.populate_config() if response['status'] == 'error': return log('Error occurred: ->' + json.dumps(response['error']), 'red')", "type includes deployment, ingress and svc.') @click.option('--data', '-d', help='Supply payload for the policy", "+ json.dumps(response['error']), 'red') is_valid, policy = validate_yaml(response['data']) if not is_valid: return log('Error occurred:", "import json import click @click.command() @click.option('--kind', '-k', help='what kind of k8s policy file", "file in jsonstring format e.g {\"name\": \"app-1\", \"version\": \"v1\"} ') def main(kind, data):", "response['status'] == 'error': return log('Error occurred: ->' + json.dumps(response['error']), 'red') is_valid, policy =", "return log('Error occurred: ->' + json.dumps(response['error']), 'red') is_valid, policy = validate_yaml(response['data']) if not", "'.yaml' build_policy = open(filename, 'w') build_policy.write(policy) build_policy.close() click.echo('successfully generate policy file') return log('Success:", "svc.') @click.option('--data', '-d', help='Supply payload for the policy file in jsonstring format e.g", "data) response = kube_policy_gen.populate_config() if response['status'] == 'error': return log('Error occurred: ->' +", "policy, 'red') filename = kube_policy_gen.kind + '.yaml' build_policy = open(filename, 'w') build_policy.write(policy) build_policy.close()", "KubePolicyGen(kind, data) response = kube_policy_gen.populate_config() if response['status'] == 'error': return log('Error occurred: ->'", "'blue') kube_policy_gen = KubePolicyGen(kind, data) response = kube_policy_gen.populate_config() if response['status'] == 'error': return", "occurred: ->' + policy, 'red') filename = kube_policy_gen.kind + '.yaml' build_policy = open(filename,", "click @click.command() @click.option('--kind', '-k', help='what kind of k8s policy file are you trying", "kube_policy_gen = KubePolicyGen(kind, data) response = kube_policy_gen.populate_config() if response['status'] == 'error': return log('Error", "kube_policy_gen.kind + '.yaml' build_policy = open(filename, 'w') build_policy.write(policy) build_policy.close() click.echo('successfully generate policy file')", "policy = validate_yaml(response['data']) if not is_valid: return log('Error occurred: ->' + policy, 'red')", "not is_valid: return log('Error occurred: ->' + policy, 'red') filename = kube_policy_gen.kind +", "kube_policy_gen.populate_config() if response['status'] == 'error': return log('Error occurred: ->' + json.dumps(response['error']), 'red') is_valid,", "'-k', help='what kind of k8s policy file are you trying to create. support", "e.g {\"name\": \"app-1\", \"version\": \"v1\"} ') def main(kind, data): log('K8s yaml policy file", "log, validate_yaml import json import click @click.command() @click.option('--kind', '-k', help='what kind of k8s", "are you trying to create. support type includes deployment, ingress and svc.') @click.option('--data',", "from .util import log, validate_yaml import json import click @click.command() @click.option('--kind', '-k', help='what", "response = kube_policy_gen.populate_config() if response['status'] == 'error': return log('Error occurred: ->' + json.dumps(response['error']),", "@click.command() @click.option('--kind', '-k', help='what kind of k8s policy file are you trying to", "import KubePolicyGen from .util import log, validate_yaml import json import click @click.command() @click.option('--kind',", "= open(filename, 'w') build_policy.write(policy) build_policy.close() click.echo('successfully generate policy file') return log('Success: ->' +", "build_policy.close() click.echo('successfully generate policy file') return log('Success: ->' + json.dumps(response['data']), 'green') if __name__", "{\"name\": \"app-1\", \"version\": \"v1\"} ') def main(kind, data): log('K8s yaml policy file generator',", "main(kind, data): log('K8s yaml policy file generator', 'blue') kube_policy_gen = KubePolicyGen(kind, data) response", "includes deployment, ingress and svc.') @click.option('--data', '-d', help='Supply payload for the policy file", "help='what kind of k8s policy file are you trying to create. support type", "help='Supply payload for the policy file in jsonstring format e.g {\"name\": \"app-1\", \"version\":", "+ policy, 'red') filename = kube_policy_gen.kind + '.yaml' build_policy = open(filename, 'w') build_policy.write(policy)", "== 'error': return log('Error occurred: ->' + json.dumps(response['error']), 'red') is_valid, policy = validate_yaml(response['data'])", "yaml policy file generator', 'blue') kube_policy_gen = KubePolicyGen(kind, data) response = kube_policy_gen.populate_config() if", "trying to create. support type includes deployment, ingress and svc.') @click.option('--data', '-d', help='Supply", "+ '.yaml' build_policy = open(filename, 'w') build_policy.write(policy) build_policy.close() click.echo('successfully generate policy file') return", "@click.option('--kind', '-k', help='what kind of k8s policy file are you trying to create.", "in jsonstring format e.g {\"name\": \"app-1\", \"version\": \"v1\"} ') def main(kind, data): log('K8s", "build_policy = open(filename, 'w') build_policy.write(policy) build_policy.close() click.echo('successfully generate policy file') return log('Success: ->'", "open(filename, 'w') build_policy.write(policy) build_policy.close() click.echo('successfully generate policy file') return log('Success: ->' + json.dumps(response['data']),", "->' + policy, 'red') filename = kube_policy_gen.kind + '.yaml' build_policy = open(filename, 'w')", "deployment, ingress and svc.') @click.option('--data', '-d', help='Supply payload for the policy file in", "jsonstring format e.g {\"name\": \"app-1\", \"version\": \"v1\"} ') def main(kind, data): log('K8s yaml", "validate_yaml import json import click @click.command() @click.option('--kind', '-k', help='what kind of k8s policy", "from .kubepolicygen import KubePolicyGen from .util import log, validate_yaml import json import click", "log('Error occurred: ->' + policy, 'red') filename = kube_policy_gen.kind + '.yaml' build_policy =", "file generator', 'blue') kube_policy_gen = KubePolicyGen(kind, data) response = kube_policy_gen.populate_config() if response['status'] ==", "and svc.') @click.option('--data', '-d', help='Supply payload for the policy file in jsonstring format", "') def main(kind, data): log('K8s yaml policy file generator', 'blue') kube_policy_gen = KubePolicyGen(kind,", "\"app-1\", \"version\": \"v1\"} ') def main(kind, data): log('K8s yaml policy file generator', 'blue')", "policy file are you trying to create. support type includes deployment, ingress and", "log('Error occurred: ->' + json.dumps(response['error']), 'red') is_valid, policy = validate_yaml(response['data']) if not is_valid:", "\"v1\"} ') def main(kind, data): log('K8s yaml policy file generator', 'blue') kube_policy_gen =", "json.dumps(response['error']), 'red') is_valid, policy = validate_yaml(response['data']) if not is_valid: return log('Error occurred: ->'", "'-d', help='Supply payload for the policy file in jsonstring format e.g {\"name\": \"app-1\",", "return log('Error occurred: ->' + policy, 'red') filename = kube_policy_gen.kind + '.yaml' build_policy", "<gh_stars>1-10 from .kubepolicygen import KubePolicyGen from .util import log, validate_yaml import json import", "@click.option('--data', '-d', help='Supply payload for the policy file in jsonstring format e.g {\"name\":", "policy file in jsonstring format e.g {\"name\": \"app-1\", \"version\": \"v1\"} ') def main(kind,", ".kubepolicygen import KubePolicyGen from .util import log, validate_yaml import json import click @click.command()", "payload for the policy file in jsonstring format e.g {\"name\": \"app-1\", \"version\": \"v1\"}", "policy file generator', 'blue') kube_policy_gen = KubePolicyGen(kind, data) response = kube_policy_gen.populate_config() if response['status']", "for the policy file in jsonstring format e.g {\"name\": \"app-1\", \"version\": \"v1\"} ')", "validate_yaml(response['data']) if not is_valid: return log('Error occurred: ->' + policy, 'red') filename =", "format e.g {\"name\": \"app-1\", \"version\": \"v1\"} ') def main(kind, data): log('K8s yaml policy", "create. support type includes deployment, ingress and svc.') @click.option('--data', '-d', help='Supply payload for", "occurred: ->' + json.dumps(response['error']), 'red') is_valid, policy = validate_yaml(response['data']) if not is_valid: return", "= kube_policy_gen.kind + '.yaml' build_policy = open(filename, 'w') build_policy.write(policy) build_policy.close() click.echo('successfully generate policy", "data): log('K8s yaml policy file generator', 'blue') kube_policy_gen = KubePolicyGen(kind, data) response =", "you trying to create. support type includes deployment, ingress and svc.') @click.option('--data', '-d',", "build_policy.write(policy) build_policy.close() click.echo('successfully generate policy file') return log('Success: ->' + json.dumps(response['data']), 'green') if", "import click @click.command() @click.option('--kind', '-k', help='what kind of k8s policy file are you", "file are you trying to create. support type includes deployment, ingress and svc.')", "generate policy file') return log('Success: ->' + json.dumps(response['data']), 'green') if __name__ == '__main__':", "generator', 'blue') kube_policy_gen = KubePolicyGen(kind, data) response = kube_policy_gen.populate_config() if response['status'] == 'error':", "KubePolicyGen from .util import log, validate_yaml import json import click @click.command() @click.option('--kind', '-k',", "json import click @click.command() @click.option('--kind', '-k', help='what kind of k8s policy file are", "'red') is_valid, policy = validate_yaml(response['data']) if not is_valid: return log('Error occurred: ->' +", "is_valid: return log('Error occurred: ->' + policy, 'red') filename = kube_policy_gen.kind + '.yaml'", "filename = kube_policy_gen.kind + '.yaml' build_policy = open(filename, 'w') build_policy.write(policy) build_policy.close() click.echo('successfully generate", "if not is_valid: return log('Error occurred: ->' + policy, 'red') filename = kube_policy_gen.kind", "'red') filename = kube_policy_gen.kind + '.yaml' build_policy = open(filename, 'w') build_policy.write(policy) build_policy.close() click.echo('successfully", "= KubePolicyGen(kind, data) response = kube_policy_gen.populate_config() if response['status'] == 'error': return log('Error occurred:", "->' + json.dumps(response['error']), 'red') is_valid, policy = validate_yaml(response['data']) if not is_valid: return log('Error", "of k8s policy file are you trying to create. support type includes deployment,", "policy file') return log('Success: ->' + json.dumps(response['data']), 'green') if __name__ == '__main__': main(prog_name=\"kubegen\")", "support type includes deployment, ingress and svc.') @click.option('--data', '-d', help='Supply payload for the", "'w') build_policy.write(policy) build_policy.close() click.echo('successfully generate policy file') return log('Success: ->' + json.dumps(response['data']), 'green')", "ingress and svc.') @click.option('--data', '-d', help='Supply payload for the policy file in jsonstring", "\"version\": \"v1\"} ') def main(kind, data): log('K8s yaml policy file generator', 'blue') kube_policy_gen", "= validate_yaml(response['data']) if not is_valid: return log('Error occurred: ->' + policy, 'red') filename", "'error': return log('Error occurred: ->' + json.dumps(response['error']), 'red') is_valid, policy = validate_yaml(response['data']) if" ]
[ "License is distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR", "= \"hello\" auth = GrpcAuth(key=key) assert key == auth._key def test_call(self): key =", "test_call(self): key = \"hello\" auth = GrpcAuth(key=key) context_mock = MagicMock() callback_mock = MagicMock()", "distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF", "KIND, either express or implied. # # See the License for the specific", "express or implied. # # See the License for the specific language governing", "License. # ################################################################################# from unittest import mock, TestCase from unittest.mock import patch, MagicMock", "copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # #", "the License. # ################################################################################# from unittest import mock, TestCase from unittest.mock import patch,", "Unless required by applicable law or agreed to in writing, software # #", "in compliance with the License. # # You may obtain a copy of", "implied. # # See the License for the specific language governing permissions and", "GrpcAuth class GrpcAuthTest(TestCase): def setUp(self) -> None: pass def test_initialize(self): key = \"hello\"", "Rights Reserved. # # # # Licensed under the Apache License, Version 2.0", "\"hello\" auth = GrpcAuth(key=key) assert key == auth._key def test_call(self): key = \"hello\"", "is distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS", "(the \"License\"). # # You may not use this file except in compliance", "permissions and # # limitations under the License. # ################################################################################# from unittest import", "not use this file except in compliance with the License. # # You", "GrpcAuth(key=key) assert key == auth._key def test_call(self): key = \"hello\" auth = GrpcAuth(key=key)", "mock, TestCase from unittest.mock import patch, MagicMock from ude.communication.grpc_auth import GrpcAuth class GrpcAuthTest(TestCase):", "# # # # Licensed under the Apache License, Version 2.0 (the \"License\").", "key = \"hello\" auth = GrpcAuth(key=key) assert key == auth._key def test_call(self): key", "All Rights Reserved. # # # # Licensed under the Apache License, Version", "import GrpcAuth class GrpcAuthTest(TestCase): def setUp(self) -> None: pass def test_initialize(self): key =", "License. # # You may obtain a copy of the License at #", "law or agreed to in writing, software # # distributed under the License", "patch, MagicMock from ude.communication.grpc_auth import GrpcAuth class GrpcAuthTest(TestCase): def setUp(self) -> None: pass", "class GrpcAuthTest(TestCase): def setUp(self) -> None: pass def test_initialize(self): key = \"hello\" auth", "distributed under the License is distributed on an \"AS IS\" BASIS, # #", "# # See the License for the specific language governing permissions and #", "# # # Unless required by applicable law or agreed to in writing,", "Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed", "import mock, TestCase from unittest.mock import patch, MagicMock from ude.communication.grpc_auth import GrpcAuth class", "applicable law or agreed to in writing, software # # distributed under the", "agreed to in writing, software # # distributed under the License is distributed", "TestCase from unittest.mock import patch, MagicMock from ude.communication.grpc_auth import GrpcAuth class GrpcAuthTest(TestCase): def", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "def test_initialize(self): key = \"hello\" auth = GrpcAuth(key=key) assert key == auth._key def", "def setUp(self) -> None: pass def test_initialize(self): key = \"hello\" auth = GrpcAuth(key=key)", "software # # distributed under the License is distributed on an \"AS IS\"", "setUp(self) -> None: pass def test_initialize(self): key = \"hello\" auth = GrpcAuth(key=key) assert", "You may not use this file except in compliance with the License. #", "limitations under the License. # ################################################################################# from unittest import mock, TestCase from unittest.mock", "under the License is distributed on an \"AS IS\" BASIS, # # WITHOUT", "Apache License, Version 2.0 (the \"License\"). # # You may not use this", "with the License. # # You may obtain a copy of the License", "to in writing, software # # distributed under the License is distributed on", "# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "You may obtain a copy of the License at # # # #", "assert key == auth._key def test_call(self): key = \"hello\" auth = GrpcAuth(key=key) context_mock", "from unittest import mock, TestCase from unittest.mock import patch, MagicMock from ude.communication.grpc_auth import", "Inc. or its affiliates. All Rights Reserved. # # # # Licensed under", "at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by", "required by applicable law or agreed to in writing, software # # distributed", "and # # limitations under the License. # ################################################################################# from unittest import mock,", "None: pass def test_initialize(self): key = \"hello\" auth = GrpcAuth(key=key) assert key ==", "the Apache License, Version 2.0 (the \"License\"). # # You may not use", "ude.communication.grpc_auth import GrpcAuth class GrpcAuthTest(TestCase): def setUp(self) -> None: pass def test_initialize(self): key", "or its affiliates. All Rights Reserved. # # # # Licensed under the", "the License. # # You may obtain a copy of the License at", "################################################################################# from unittest import mock, TestCase from unittest.mock import patch, MagicMock from ude.communication.grpc_auth", "its affiliates. All Rights Reserved. # # # # Licensed under the Apache", "\"License\"). # # You may not use this file except in compliance with", "\"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "from ude.communication.grpc_auth import GrpcAuth class GrpcAuthTest(TestCase): def setUp(self) -> None: pass def test_initialize(self):", "GrpcAuthTest(TestCase): def setUp(self) -> None: pass def test_initialize(self): key = \"hello\" auth =", "# # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law", "in writing, software # # distributed under the License is distributed on an", "may not use this file except in compliance with the License. # #", "# # Licensed under the Apache License, Version 2.0 (the \"License\"). # #", "Version 2.0 (the \"License\"). # # You may not use this file except", "on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 #", "# limitations under the License. # ################################################################################# from unittest import mock, TestCase from", "= GrpcAuth(key=key) context_mock = MagicMock() callback_mock = MagicMock() auth(context=context_mock, callback=callback_mock) callback_mock.assert_called_with((('rpc-auth-header', key),), None)", "OF ANY KIND, either express or implied. # # See the License for", "unittest.mock import patch, MagicMock from ude.communication.grpc_auth import GrpcAuth class GrpcAuthTest(TestCase): def setUp(self) ->", "== auth._key def test_call(self): key = \"hello\" auth = GrpcAuth(key=key) context_mock = MagicMock()", "or agreed to in writing, software # # distributed under the License is", "CONDITIONS OF ANY KIND, either express or implied. # # See the License", "= GrpcAuth(key=key) assert key == auth._key def test_call(self): key = \"hello\" auth =", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # #", "from unittest.mock import patch, MagicMock from ude.communication.grpc_auth import GrpcAuth class GrpcAuthTest(TestCase): def setUp(self)", "MagicMock from ude.communication.grpc_auth import GrpcAuth class GrpcAuthTest(TestCase): def setUp(self) -> None: pass def", "ANY KIND, either express or implied. # # See the License for the", "Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # #", "auth = GrpcAuth(key=key) assert key == auth._key def test_call(self): key = \"hello\" auth", "# # Unless required by applicable law or agreed to in writing, software", "# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # #", "or implied. # # See the License for the specific language governing permissions", "# Unless required by applicable law or agreed to in writing, software #", "the License for the specific language governing permissions and # # limitations under", "this file except in compliance with the License. # # You may obtain", "License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required", "# http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed", "use this file except in compliance with the License. # # You may", "unittest import mock, TestCase from unittest.mock import patch, MagicMock from ude.communication.grpc_auth import GrpcAuth", "specific language governing permissions and # # limitations under the License. # #################################################################################", "under the Apache License, Version 2.0 (the \"License\"). # # You may not", "def test_call(self): key = \"hello\" auth = GrpcAuth(key=key) context_mock = MagicMock() callback_mock =", "# Licensed under the Apache License, Version 2.0 (the \"License\"). # # You", "of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # #", "# # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or", "import patch, MagicMock from ude.communication.grpc_auth import GrpcAuth class GrpcAuthTest(TestCase): def setUp(self) -> None:", "for the specific language governing permissions and # # limitations under the License.", "# # You may obtain a copy of the License at # #", "except in compliance with the License. # # You may obtain a copy", "key == auth._key def test_call(self): key = \"hello\" auth = GrpcAuth(key=key) context_mock =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See", "test_initialize(self): key = \"hello\" auth = GrpcAuth(key=key) assert key == auth._key def test_call(self):", "the License is distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES", "the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless", "key = \"hello\" auth = GrpcAuth(key=key) context_mock = MagicMock() callback_mock = MagicMock() auth(context=context_mock,", "<reponame>aws-deepracer/ude ################################################################################# # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #", "# # # Licensed under the Apache License, Version 2.0 (the \"License\"). #", "License for the specific language governing permissions and # # limitations under the", "file except in compliance with the License. # # You may obtain a", "language governing permissions and # # limitations under the License. # ################################################################################# from", "may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0", "# # limitations under the License. # ################################################################################# from unittest import mock, TestCase", "OR CONDITIONS OF ANY KIND, either express or implied. # # See the", "BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# See the License for the specific language governing permissions and # #", "See the License for the specific language governing permissions and # # limitations", "################################################################################# # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # #", "an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# # distributed under the License is distributed on an \"AS IS\" BASIS,", "a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License, Version 2.0 (the \"License\"). # # You may not use this file", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "Reserved. # # # # Licensed under the Apache License, Version 2.0 (the", "auth._key def test_call(self): key = \"hello\" auth = GrpcAuth(key=key) context_mock = MagicMock() callback_mock", "auth = GrpcAuth(key=key) context_mock = MagicMock() callback_mock = MagicMock() auth(context=context_mock, callback=callback_mock) callback_mock.assert_called_with((('rpc-auth-header', key),),", "# You may not use this file except in compliance with the License.", "-> None: pass def test_initialize(self): key = \"hello\" auth = GrpcAuth(key=key) assert key", "Licensed under the Apache License, Version 2.0 (the \"License\"). # # You may", "affiliates. All Rights Reserved. # # # # Licensed under the Apache License,", "IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "under the License. # ################################################################################# from unittest import mock, TestCase from unittest.mock import", "# # You may not use this file except in compliance with the", "http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to", "pass def test_initialize(self): key = \"hello\" auth = GrpcAuth(key=key) assert key == auth._key", "2.0 (the \"License\"). # # You may not use this file except in", "writing, software # # distributed under the License is distributed on an \"AS", "either express or implied. # # See the License for the specific language", "# ################################################################################# from unittest import mock, TestCase from unittest.mock import patch, MagicMock from", "# # # # Unless required by applicable law or agreed to in", "the specific language governing permissions and # # limitations under the License. #", "= \"hello\" auth = GrpcAuth(key=key) context_mock = MagicMock() callback_mock = MagicMock() auth(context=context_mock, callback=callback_mock)", "governing permissions and # # limitations under the License. # ################################################################################# from unittest", "\"hello\" auth = GrpcAuth(key=key) context_mock = MagicMock() callback_mock = MagicMock() auth(context=context_mock, callback=callback_mock) callback_mock.assert_called_with((('rpc-auth-header',", "compliance with the License. # # You may obtain a copy of the", "# You may obtain a copy of the License at # # #", "by applicable law or agreed to in writing, software # # distributed under", "# # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable" ]
[ "fh: for i in range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32), [1], [0], [1]) fh.end_step() def", "#!/usr/bin/env python # # Distributed under the OSI-approved Apache License, Version 2.0. See", "[1], [0], [1]) fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps = [3, 5,", "= [3, 5, 7] param_string = \",\".join([str(i) for i in selected_steps]) adios =", "as np import adios2 TESTDATA_FILENAME = \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps =", "ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data = np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data,", "See # accompanying file Copyright.txt for details. # # TestBPSelectSteps_nompi.py: test step selection", "File Write # Created on: Jan 29, 2021 # Author: <NAME> <EMAIL> import", "test_select_steps_reading_fullAPI(self): selected_steps = [3, 5, 7] param_string = \",\".join([str(i) for i in selected_steps])", "fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data = np.zeros(len(selected_steps), dtype=np.int32)", "with adios2.open(TESTDATA_FILENAME, \"w\") as fh: for i in range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32), [1],", "i in selected_steps]) adios = adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh =", "ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data =", "adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data = np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync)", "fh.write(\"step\", np.array([i], dtype=np.int32), [1], [0], [1]) fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps", "License, Version 2.0. See # accompanying file Copyright.txt for details. # # TestBPSelectSteps_nompi.py:", "TESTDATA_FILENAME = \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps = 10 with adios2.open(TESTDATA_FILENAME, \"w\")", "ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data = np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i]", "= ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data = np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i] ==", "= np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i] for i in range(len(selected_steps))]))", "import adios2 TESTDATA_FILENAME = \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps = 10 with", "= adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var =", "ADIOS2 File Write # Created on: Jan 29, 2021 # Author: <NAME> <EMAIL>", "range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32), [1], [0], [1]) fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self):", "[3, 5, 7] param_string = \",\".join([str(i) for i in selected_steps]) adios = adios2.ADIOS()", "ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0,", "unittest import shutil import numpy as np import adios2 TESTDATA_FILENAME = \"steps_int32.bp\" class", "python # # Distributed under the OSI-approved Apache License, Version 2.0. See #", "OSI-approved Apache License, Version 2.0. See # accompanying file Copyright.txt for details. #", "adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i] for i in range(len(selected_steps))])) if __name__ == '__main__': unittest.main()", "class TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps = 10 with adios2.open(TESTDATA_FILENAME, \"w\") as fh: for", "under the OSI-approved Apache License, Version 2.0. See # accompanying file Copyright.txt for", "Apache License, Version 2.0. See # accompanying file Copyright.txt for details. # #", "file Copyright.txt for details. # # TestBPSelectSteps_nompi.py: test step selection by reading in", "\"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps = 10 with adios2.open(TESTDATA_FILENAME, \"w\") as fh:", "numpy as np import adios2 TESTDATA_FILENAME = \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps", "5, 7] param_string = \",\".join([str(i) for i in selected_steps]) adios = adios2.ADIOS() ioReadBP", "Python # in ADIOS2 File Write # Created on: Jan 29, 2021 #", "var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data = np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i]", "fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i] for i in range(len(selected_steps))])) if __name__ ==", "# # Distributed under the OSI-approved Apache License, Version 2.0. See # accompanying", "# # TestBPSelectSteps_nompi.py: test step selection by reading in Python # in ADIOS2", "import shutil import numpy as np import adios2 TESTDATA_FILENAME = \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase):", "# Author: <NAME> <EMAIL> import unittest import shutil import numpy as np import", "# Distributed under the OSI-approved Apache License, Version 2.0. See # accompanying file", "<EMAIL> import unittest import shutil import numpy as np import adios2 TESTDATA_FILENAME =", "\"w\") as fh: for i in range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32), [1], [0], [1])", "total_steps = 10 with adios2.open(TESTDATA_FILENAME, \"w\") as fh: for i in range(total_steps): fh.write(\"step\",", "adios = adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var", "= adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)])", "in Python # in ADIOS2 File Write # Created on: Jan 29, 2021", "29, 2021 # Author: <NAME> <EMAIL> import unittest import shutil import numpy as", "shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps = [3, 5, 7] param_string = \",\".join([str(i) for i", "2.0. See # accompanying file Copyright.txt for details. # # TestBPSelectSteps_nompi.py: test step", "= 10 with adios2.open(TESTDATA_FILENAME, \"w\") as fh: for i in range(total_steps): fh.write(\"step\", np.array([i],", "2021 # Author: <NAME> <EMAIL> import unittest import shutil import numpy as np", "def test_select_steps_reading_fullAPI(self): selected_steps = [3, 5, 7] param_string = \",\".join([str(i) for i in", "var.SetStepSelection([0, len(selected_steps)]) data = np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i] for", "def setUp(self): total_steps = 10 with adios2.open(TESTDATA_FILENAME, \"w\") as fh: for i in", "# in ADIOS2 File Write # Created on: Jan 29, 2021 # Author:", "fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps = [3, 5, 7] param_string =", "dtype=np.int32), [1], [0], [1]) fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps = [3,", "Distributed under the OSI-approved Apache License, Version 2.0. See # accompanying file Copyright.txt", "step selection by reading in Python # in ADIOS2 File Write # Created", "adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data", "the OSI-approved Apache License, Version 2.0. See # accompanying file Copyright.txt for details.", "for details. # # TestBPSelectSteps_nompi.py: test step selection by reading in Python #", "dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i] for i in range(len(selected_steps))])) if __name__", "= \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps = 10 with adios2.open(TESTDATA_FILENAME, \"w\") as", "selection by reading in Python # in ADIOS2 File Write # Created on:", "np import adios2 TESTDATA_FILENAME = \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps = 10", "i in range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32), [1], [0], [1]) fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME)", "data = np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i] for i in", "in range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32), [1], [0], [1]) fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def", "param_string) fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data = np.zeros(len(selected_steps),", "[0], [1]) fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps = [3, 5, 7]", "selected_steps = [3, 5, 7] param_string = \",\".join([str(i) for i in selected_steps]) adios", "for i in range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32), [1], [0], [1]) fh.end_step() def tearDown(self):", "selected_steps]) adios = adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read)", "TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps = 10 with adios2.open(TESTDATA_FILENAME, \"w\") as fh: for i", "TestBPSelectSteps_nompi.py: test step selection by reading in Python # in ADIOS2 File Write", "7] param_string = \",\".join([str(i) for i in selected_steps]) adios = adios2.ADIOS() ioReadBP =", "# Created on: Jan 29, 2021 # Author: <NAME> <EMAIL> import unittest import", "Jan 29, 2021 # Author: <NAME> <EMAIL> import unittest import shutil import numpy", "for i in selected_steps]) adios = adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh", "len(selected_steps)]) data = np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i] for i", "np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var, data, adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i] for i in range(len(selected_steps))])) if", "test step selection by reading in Python # in ADIOS2 File Write #", "= ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\") var.SetStepSelection([0, len(selected_steps)]) data = np.zeros(len(selected_steps), dtype=np.int32) fh.Get(var,", "Copyright.txt for details. # # TestBPSelectSteps_nompi.py: test step selection by reading in Python", "10 with adios2.open(TESTDATA_FILENAME, \"w\") as fh: for i in range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32),", "param_string = \",\".join([str(i) for i in selected_steps]) adios = adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\")", "<filename>testing/adios2/bindings/python/TestBPSelectSteps_nompi.py #!/usr/bin/env python # # Distributed under the OSI-approved Apache License, Version 2.0.", "in selected_steps]) adios = adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh = ioReadBP.Open(TESTDATA_FILENAME,", "adios2.open(TESTDATA_FILENAME, \"w\") as fh: for i in range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32), [1], [0],", "= \",\".join([str(i) for i in selected_steps]) adios = adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME,", "data, adios2.Mode.Sync) self.assertTrue(all([data[i] == selected_steps[i] for i in range(len(selected_steps))])) if __name__ == '__main__':", "# TestBPSelectSteps_nompi.py: test step selection by reading in Python # in ADIOS2 File", "<NAME> <EMAIL> import unittest import shutil import numpy as np import adios2 TESTDATA_FILENAME", "adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string) fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read) var = ioReadBP.InquireVariable(\"step\")", "reading in Python # in ADIOS2 File Write # Created on: Jan 29,", "import numpy as np import adios2 TESTDATA_FILENAME = \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def setUp(self):", "Created on: Jan 29, 2021 # Author: <NAME> <EMAIL> import unittest import shutil", "[1]) fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps = [3, 5, 7] param_string", "Write # Created on: Jan 29, 2021 # Author: <NAME> <EMAIL> import unittest", "on: Jan 29, 2021 # Author: <NAME> <EMAIL> import unittest import shutil import", "tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps = [3, 5, 7] param_string = \",\".join([str(i) for", "import unittest import shutil import numpy as np import adios2 TESTDATA_FILENAME = \"steps_int32.bp\"", "\",\".join([str(i) for i in selected_steps]) adios = adios2.ADIOS() ioReadBP = adios.DeclareIO(\"hellopy\") ioReadBP.SetParameter(TESTDATA_FILENAME, param_string)", "in ADIOS2 File Write # Created on: Jan 29, 2021 # Author: <NAME>", "np.array([i], dtype=np.int32), [1], [0], [1]) fh.end_step() def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps =", "accompanying file Copyright.txt for details. # # TestBPSelectSteps_nompi.py: test step selection by reading", "Version 2.0. See # accompanying file Copyright.txt for details. # # TestBPSelectSteps_nompi.py: test", "setUp(self): total_steps = 10 with adios2.open(TESTDATA_FILENAME, \"w\") as fh: for i in range(total_steps):", "adios2 TESTDATA_FILENAME = \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def setUp(self): total_steps = 10 with adios2.open(TESTDATA_FILENAME,", "details. # # TestBPSelectSteps_nompi.py: test step selection by reading in Python # in", "# accompanying file Copyright.txt for details. # # TestBPSelectSteps_nompi.py: test step selection by", "def tearDown(self): shutil.rmtree(TESTDATA_FILENAME) def test_select_steps_reading_fullAPI(self): selected_steps = [3, 5, 7] param_string = \",\".join([str(i)", "shutil import numpy as np import adios2 TESTDATA_FILENAME = \"steps_int32.bp\" class TestAdiosSelectSteps(unittest.TestCase): def", "as fh: for i in range(total_steps): fh.write(\"step\", np.array([i], dtype=np.int32), [1], [0], [1]) fh.end_step()", "by reading in Python # in ADIOS2 File Write # Created on: Jan", "Author: <NAME> <EMAIL> import unittest import shutil import numpy as np import adios2" ]
[ "['All Homes 1 Year After Date Filed', 'All Homes Lockup Expiration Date'], 'All", "here I am defining it. weighted average is by time differential from beginning", "= predictions[1] df_x[\"Pred House Price GB\"] = predictions[2] df_x[\"Pred House Price ET Change\"]", "return df_ipo def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor): # Predicting Median Price of All", "ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row where if", "Employees']) for i in range(0, len(ipo_cols)): dict[ipo_cols[i] + '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df", "White', 'Percent of People whose Income in the Past 12 months has been", "Date'], 'All Homes 2 Years After Date Filed', 0.5) #view_feature_distributions(df_train) feature_cols = [", "import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.pipeline", "predictions_gb] def create_predictions(predictions, df_x, label_divider): df_x[\"Pred House Price ET\"] = predictions[0] df_x[\"Pred House", "Homes Date Filed','Number of Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company Name']) min_max_normalization_list = ['Found_weighted',", "who are White', 'Percent of People who are Black or African American', 'Percent", "Work Estimate (minutes)', 'Percent of Households with Income Greater than $200,000', 'Median Household", "Filed', '2 Year Home Value ratio', 'All Homes Date Filed') #show_correlations_matrix(df_train, ['All Homes", "drop_columns) #ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed',", "years. Decisions: weighted average of encoded historical data --> either I can define", "in range(0, len(ipo_cols)): dict[ipo_cols[i] + '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data) return", "from sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer from sklearn import ensemble, datasets, metrics from", "ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row where if there is any 'NaN' value in", "Date Filed', '2 Year Home Value ratio', 'All Homes Date Filed') #show_correlations_matrix(df_train, ['All", "People 65 years and over', 'Percent of Males', 'Percent of Females', 'Percent of", "'Percent of Females', 'Percent of People who are Hispanic', 'Percent of People who", "Year Before Date Filed', 'All Homes 2 Years After Date Filed', 'Date Filed',", "plt.xlabel(col) plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list): # Split-out validation dataset X", "random_state=seed) cv_results = cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS) results.append(cv_results) names.append(name) msg = \"%s:", "df_train_y) # transform the validation dataset predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy -->", "Age', 'Percent of People under 18 years of age', 'Percent of People 65", "encoded historical data --> either I can define it or learn it, but", "2 Years After Date Filed', 0.5) #view_feature_distributions(df_train) feature_cols = [ 'Distance to IPO_weighted',", "window used for encoding and prediction. Likely 2 years. Decisions: weighted average of", "print(results.summary()) plt.figure(figsize=(8, 5)) p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin = min(results.fittedvalues) xmax =", "= model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) return [model,", "= corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr = df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation between", "dataset predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100)", "#plt.show() def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y): # Test options and evaluation metric using", "who are Asian', 'Percent of People who are Black or African American', 'Percent", "model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) #", "df_x[\"Pred House Price ET Change\"] = predictions[0] / df_x[label_divider] - 1 df_x[\"Pred House", "model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) # transform the validation dataset predictions_gb = model_gb.predict(df_validation_x)", "= [ 'Mean Household Income Estimate (dollars)', 'Mean Travel Time to Work Estimate", "$24,999', 'Distance to IPO_weighted'] quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number of Employees_weighted'] ipo_final_with_date_filed_home =", "AdaBoostRegressor from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.pipeline import Pipeline import statsmodels.formula.api", "df_validation_y, seed): # prepare the model model = ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y) #", "of window to the end :return: ''' encoded_data = [] df[date_field] = pd.to_datetime(df[date_field],", "Than $24,999', 'Percent of Households with Income Greater than $200,000', 'Percent of Males',", "validation dataset X = df_train.loc[:, feature_list] y = df_train[label_attr] x_pred_test = df_test.loc[:, feature_list]", "# Test options and evaluation metric using Root Mean Square error method seed", "'Percent of People who are Asian', 'Unemployment Rate', 'Mean Travel Time to Work", "zipcode and the home prices at the Date Filed Time, the Lockup Date,", "df_train_y,df_validation_x, df_validation_y, 7) predictions = make_predictions_model(models, df_test_x) df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All Homes", "def make_predictions_model(models, df_test_x): # prepare the model predictions = models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x)", "train_test_split(X, y, test_size=0.2, random_state=42) return X_train, X_validation, Y_train, Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr):", "annot=True) plt.title('Correlation between features'); plt.show() def view_feature_distributions(df): # histograms df.hist(bins=25, figsize=(25, 20), grid=False);", "['All Homes Date Filed','Number of Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company Name']) min_max_normalization_list =", "differential from beginning of window to the end :return: ''' encoded_data = []", "the model model = ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y) # transform the validation dataset", "0', 'CIK', 'Company Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes Date Filed','Number of Employees'],", "def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed): # prepare the model model = ExtraTreesRegressor(random_state=seed, n_estimators=100)", "model.score(df_validation_x, df_validation_y) * 100) return [model, model_rf, model_gb] def make_predictions_model(models, df_test_x): # prepare", "# transform the validation dataset predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy --> \",", "predictions[1] / df_x[label_divider] - 1 df_x[\"Pred House Price GB Change\"] = predictions[2] /df_x[label_divider]", "to IPO_weighted'] quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number of Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list,", "2 Years After Date Filed', '2 Year Home Value ratio', 'All Homes Date", "as np import sklearn import matplotlib.pyplot as plt import seaborn as sns from", "(dollars)', 'Mean Travel Time to Work Estimate (minutes)', 'Median Age', 'Median Household Income", "'Percent of Males', 'Percent of People 65 years and over', 'Percent of People", "make_predictions_model(models, df_test_x): # prepare the model predictions = models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x) predictions_gb", "range(0, len(ipo_cols)): dict[ipo_cols[i] + '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data) return ipo_final_ecoded_df", "encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data) return ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number])", "years and over', 'Percent of Males', 'Percent of Females', 'Percent of People who", "after the date is filed. ''' ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0,", "of Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes 2 Years After Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes", "'Per Capita Income Estimate (dollars)', 'Percent of Population with no Health Insurance Coverage',", "plt.plot(x, y, 'o') # Create regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col)", "People under 18 years of age', 'Percent of People who are Asian', 'Percent", "'Percent of People who are White', 'Percent of People whose Income in the", "the Lockup Date, 1 Year after the Date is Filed and 2 years", "stats from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer from sklearn", "'Company Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes Date Filed','Number of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date", "sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer from sklearn import ensemble, datasets, metrics from sklearn.utils", "3 plots here hence 1, 3 plt.subplot(10, 6, i + 1) x =", "= pd.to_numeric(filtered_rows['Number of Employees']) for i in range(0, len(ipo_cols)): dict[ipo_cols[i] + '_weighted'] =", "import train_test_split from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection import KFold,", "'Mean Travel Time to Work Estimate (minutes)', 'Percent of Households with Income Greater", "Price ET Change\"] = predictions[0] / df_x[label_divider] - 1 df_x[\"Pred House Price RF", "IPO_weighted', 'Found_weighted', 'Mean Household Income Estimate (dollars)', 'Mean Travel Time to Work Estimate", "= pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row where if there", "Filed','Number of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df =", "drop_columns = ['Unnamed: 0', 'CIK', 'Company Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes Date", "between features'); plt.show() def view_feature_distributions(df): # histograms df.hist(bins=25, figsize=(25, 20), grid=False); def view_residual_feature_plots(df,", "model in pipelines: kfold = KFold(n_splits=num_folds, random_state=seed) cv_results = cross_val_score(model, df_train_x, df_train_y, cv=kfold,", "House Price RF Change\"] = predictions[1] / df_x[label_divider] - 1 df_x[\"Pred House Price", "the demographics of each of those zipcodes, economic data of the zipcode and", "models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x) return [predictions, predictions_rf, predictions_gb] def create_predictions(predictions,", "pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row where if there is", "data of the zipcode and the home prices at the Date Filed Time,", "Lockup Expiration Date'], 'All Homes 2 Years After Date Filed', 0.5) #view_feature_distributions(df_train) feature_cols", "that will be used to create time windows :param location_field: field that denotes", "--> \", model.score(df_validation_x, df_validation_y) * 100) return [model, model_rf, model_gb] def make_predictions_model(models, df_test_x):", "pd.DataFrame(encoded_data) return ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns)", "from sklearn import ensemble, datasets, metrics from sklearn.utils import shuffle from sklearn.metrics import", "return ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns) train_corr.shape", "--> \", model.score(df_validation_x, df_validation_y) * 100) # prepare the model model_rf = RandomForestRegressor(random_state=seed,", "dataframe with ipo data :param date_field: field that will be used to create", "ipo_final_df def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max = MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile", "plt.figure(figsize=(8, 5)) p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin = min(results.fittedvalues) xmax = max(results.fittedvalues)", "filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data) return ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr =", "House Price GB Change\"] = predictions[2] /df_x[label_divider] - 1 return df_x def main_build_predictions():", "plots here hence 1, 3 plt.subplot(10, 6, i + 1) x = df[col]", "'All Homes 2 Years After Date Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] # dataset that", "k_folds_algorithms,df_train_x, df_train_y) models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7) predictions = make_predictions_model(models, df_test_x) df_test_x_with_pred", "df_x[label_divider] - 1 df_x[\"Pred House Price GB Change\"] = predictions[2] /df_x[label_divider] - 1", "Homes Date Filed','All Homes 1 Year Before Date Filed', 'Zipcode for Distance', 'Number", "Greater than $200,000', 'Percent of Males', 'Percent of People 65 years and over',", "pd.to_numeric(filtered_rows['Number of Employees']) for i in range(0, len(ipo_cols)): dict[ipo_cols[i] + '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]])", "'Percent of People 65 years and over', 'Percent of People under 18 years", "= normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes", "Price RF Change\"] = predictions[1] / df_x[label_divider] - 1 df_x[\"Pred House Price GB", "Year Before Date Filed', 'Zipcode for Distance', 'Number of Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes", "= ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row where if there is any 'NaN' value", "label_attr, ratio_label, ratio_divisor): # Predicting Median Price of All Homes in a Zipcode,", "a 10 mile radius from the IPO Zipcode, the demographics of each of", "After Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After Date Filed') df_train_x, df_validation_x,", "Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Females', 'Percent of", "data. Within radius of 10 miles of IPO :param time_window: time window used", "pd.to_datetime(df[date_field], format='%Y-%m-%d') for index, row in df.iterrows(): dict = row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field]", "65 years and over', 'Percent of People under 18 years of age', 'Percent", "xmin = min(results.fittedvalues) xmax = max(results.fittedvalues) plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax * 1.1,", "Estimate (minutes)', 'Percent of Households with Income Greater than $200,000', 'Median Household Income", "'Percent of People who are Black or African American', 'Percent of People who", "'Unemployment Rate', 'Mean Travel Time to Work Estimate (minutes)', 'Percent of Households with", "df_train_y,df_validation_x, df_validation_y, seed): # prepare the model model = ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y)", "plot=plt) plt.show() def run_ordinary_least_squares(df_x, df_y): model = sm.OLS(df_y, df_x) results = model.fit() print(results.summary())", "- 1 df_x[\"Pred House Price RF Change\"] = predictions[1] / df_x[label_divider] - 1", "feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After Date Filed') df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x", "1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows = filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number of", "People who are Asian', 'Percent of People who are Black or African American',", "is Filed and 2 years after the date is filed. ''' ipo_final_df =", "9)) sns.heatmap(corr, annot=True) plt.show() top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr =", "show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns) train_corr.shape # Correlation plot", "Date Filed', 0.5) #view_feature_distributions(df_train) feature_cols = [ 'Distance to IPO_weighted', 'Found_weighted', 'Mean Household", "correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr = df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation between features'); plt.show() def", "Poverty Level', 'Percent of Households With Income Less Than $24,999', 'Distance to IPO_weighted']", "been Below Poverty Level', 'Percent of Households With Income Less Than $24,999', 'Distance", "{:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') # Probablity plot fig = plt.figure() stats.probplot(df[attr], plot=plt)", "QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def create_test_train_set(df_ipo, label_attr, ratio_label,", "#plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After Date Filed') df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x =", "'Zipcode for Distance', 'Number of Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes 2 Years After Date", "Decisions: weighted average of encoded historical data --> either I can define it", "date is filed. ''' ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) #", "Home Value ratio', 'All Homes Date Filed') #show_correlations_matrix(df_train, ['All Homes 1 Year After", "Homes in a Zipcode, and strucuturing data to do so. df_ipo[ratio_label] = df_ipo[label_attr]", "min_max_normalization_list = ['Found_weighted', 'Median Age', 'Percent of People under 18 years of age',", "df_train_y): # Test options and evaluation metric using Root Mean Square error method", "Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data): feature_cols = [ 'Mean Household Income Estimate (dollars)',", "Median Price of All Homes in a Zipcode, and strucuturing data to do", "options and evaluation metric using Root Mean Square error method seed = 7", "predictions[2] df_x[\"Pred House Price ET Change\"] = predictions[0] / df_x[label_divider] - 1 df_x[\"Pred", "People under 18 years of age', 'Percent of People 65 years and over',", "validation dataset predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) *", "'All Homes 2 Years After Date Filed', 'Date Filed', 'Zipcode for Distance'] ipo_cols", "for i in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results = [] names", "def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y): # Test options and evaluation metric using Root", "and over', 'Percent of People under 18 years of age', 'Percent of People", "# transform the validation dataset predictions = model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy", "Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Population with no Health", "('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models =", "y=results.resid, edgecolor='k') xmin = min(results.fittedvalues) xmax = max(results.fittedvalues) plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax", "prep_train_validation_test_data(df_train, df_test, label_attr, feature_list): # Split-out validation dataset X = df_train.loc[:, feature_list] y", "MinMaxScaler, RobustScaler, QuantileTransformer from sklearn import ensemble, datasets, metrics from sklearn.utils import shuffle", "2 Years After Date Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] # dataset that I will", "Homes 2 Years After Date Filed', '2 Year Home Value ratio', 'All Homes", "Income Estimate (dollars)', 'Percent of Females', 'Percent of Households With Income Less Than", "names = [] for name, model in pipelines: kfold = KFold(n_splits=num_folds, random_state=seed) cv_results", "df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor):", "'CIK', 'Company Name']) min_max_normalization_list = ['Found_weighted', 'Median Age', 'Percent of People under 18", "* 100) return [model, model_rf, model_gb] def make_predictions_model(models, df_test_x): # prepare the model", "will be used to create time windows :param location_field: field that denotes the", "= model.fit() print(results.summary()) plt.figure(figsize=(8, 5)) p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin = min(results.fittedvalues)", "load_processed_ipo_data(data, ['All Homes Date Filed','Number of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'],", "x_pred_test = df_test.loc[:, feature_list] X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.2, random_state=42)", "'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field])", "Travel Time to Work Estimate (minutes)', 'Median Age', 'Median Household Income Estimate (dollars)',", "Time to Work Estimate (minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Per", "(dollars)', 'Mean Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of", "= 'neg_mean_squared_error' pipelines = [] for i in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()),", "= models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x) return [predictions, predictions_rf, predictions_gb] def", "use to train the model because it does have 'All Homes 2 Years", "plt.show() def view_feature_distributions(df): # histograms df.hist(bins=25, figsize=(25, 20), grid=False); def view_residual_feature_plots(df, label_attr, feature_list):", "#show_correlations_matrix(df_train, ['All Homes 1 Year After Date Filed', 'All Homes Lockup Expiration Date'],", "Within radius of 10 miles of IPO :param time_window: time window used for", "sklearn.utils import shuffle from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.ensemble", "filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows = filtered_rows.replace(['--'], [1], regex=True)", "df_test, 'All Homes 2 Years After Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR',", "every IPO in Silicon Valley, and each zip code in a 10 mile", "in df.iterrows(): dict = row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field] > row[date_field]) & (df[date_field] <", "= df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols, ipo_cols): '''", "model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) params = {'n_estimators':", "the Past 12 months has been Below Poverty Level', 'Percent of Population with", "can define it or learn it, but here I am defining it. weighted", "= (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows = filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number", "years and over', 'Percent of People under 18 years of age', 'Percent of", "the end :return: ''' encoded_data = [] df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d') for index,", "Work Estimate (minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Offer Amount_weighted', 'Per", "import shuffle from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.ensemble import", "= predictions[2] /df_x[label_divider] - 1 return df_x def main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All", "import sklearn import matplotlib.pyplot as plt import seaborn as sns from scipy import", "is filed. ''' ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove", "('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7) predictions =", "--> either I can define it or learn it, but here I am", "4, 'min_samples_split': 2, 'learning_rate': 0.01, 'loss': 'ls'} model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) #", "here hence 1, 3 plt.subplot(10, 6, i + 1) x = df[col] y", "linestyle='--', lw=3) plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs. residuals plot\", fontsize=18) plt.grid(True)", "but here I am defining it. weighted average is by time differential from", "Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Females', 'Percent", "2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False) if __name__ == \"__main__\": print(\"we are learning\") create_encoding_historical_zipcode_data('../data/processed/df_ipo_all.csv')", "Distance', 2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False) if __name__ == \"__main__\": print(\"we are learning\")", "MinMaxScaler()), algs_to_test[i][1]]))) results = [] names = [] for name, model in pipelines:", "the Date is Filed and 2 years after the date is filed. '''", "3 plt.subplot(10, 6, i + 1) x = df[col] y = df[label_attr] plt.plot(x,", "prices at the Date Filed Time, the Lockup Date, 1 Year after the", "With Income Less Than $24,999', 'Distance to IPO_weighted'] quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number", "def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns) train_corr.shape # Correlation", "df_test.loc[:, feature_list] X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.2, random_state=42) return X_train,", "== row[location_field]] filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y'))", "People who are Hispanic', 'Percent of People who are White', 'Percent of People", "(df[date_field] < row[date_field] + np.timedelta64(time_window, 'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index =", "feature_list] y = df_train[label_attr] x_pred_test = df_test.loc[:, feature_list] X_train, X_validation, Y_train, Y_validation =", "create_encoding_historical_zipcode_data(data): feature_cols = [ 'Mean Household Income Estimate (dollars)', 'Mean Travel Time to", "of Employees'] = pd.to_numeric(filtered_rows['Number of Employees']) for i in range(0, len(ipo_cols)): dict[ipo_cols[i] +", "regex=True) filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number of Employees']) for i in range(0, len(ipo_cols)):", "Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date", "to create time windows :param location_field: field that denotes the zipcode demographic and", "format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for Distance', 2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\",", "model.score(df_validation_x, df_validation_y) * 100) # prepare the model model_rf = RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x,", "df_test_x): # prepare the model predictions = models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x) predictions_gb =", "return ipo_final_df def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max = MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list])", "Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm) # Get the fitted", "Filed', 'Zipcode for Distance'] ipo_cols = ['Offer Amount', 'Number of Employees', 'Found', 'Distance", "with Income Greater than $200,000', 'Median Household Income Estimate (dollars)', 'Mean Household Income", "the model model_rf = RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y) # transform the validation dataset", "names.append(name) msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std()) print(msg) def build_models(df_train_x,", "years of age', 'Percent of People who are Asian', 'Percent of People who", "I am defining it. weighted average is by time differential from beginning of", "df_train_y) # transform the validation dataset predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy -->", "the zipcode demographic and economic data. Within radius of 10 miles of IPO", "to do so. df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor] # dataset that does not", "2, 'learning_rate': 0.01, 'loss': 'ls'} model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) # transform the", "with ipo data :param date_field: field that will be used to create time", "Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF',", "Distance', 'Number of Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes 2 Years After Date Filed', feature_cols)", "the fitted parameters used by the function (mu, sigma) = stats.norm.fit(df[attr]) # plot", "feature_list): # Split-out validation dataset X = df_train.loc[:, feature_list] y = df_train[label_attr] x_pred_test", "df_validation_y) * 100) params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.01,", "location_field: field that denotes the zipcode demographic and economic data. Within radius of", "of People who are Black or African American', 'Percent of People who are", "Price GB\"] = predictions[2] df_x[\"Pred House Price ET Change\"] = predictions[0] / df_x[label_divider]", "# transform the validation dataset predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy --> \",", "demographics of each of those zipcodes, economic data of the zipcode and the", "= scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor): # Predicting Median Price", "Homes 1 Year After Date Filed', 'All Homes Lockup Expiration Date'], 'All Homes", "House Price RF\"] = predictions[1] df_x[\"Pred House Price GB\"] = predictions[2] df_x[\"Pred House", "of All Homes in a Zipcode, and strucuturing data to do so. df_ipo[ratio_label]", "scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor): # Predicting", "RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y) # transform the validation dataset predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y,", "Root Mean Square error method seed = 7 RMS = 'neg_mean_squared_error' pipelines =", "df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols, ipo_cols):", "'All Homes Date Filed','All Homes 1 Year Before Date Filed', 'All Homes 2", "'Mean Travel Time to Work Estimate (minutes)', 'Median Age', 'Median Household Income Estimate", "of People 65 years and over', 'Percent of People under 18 years of", "ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row where if there is any 'NaN'", "ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for Distance', 2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False)", "= make_predictions_model(models, df_test_x) df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False)", "Homes 2 Years After Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After Date", ":return: ''' encoded_data = [] df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d') for index, row in", "Homes 2 Years After Date Filed', 0.5) #view_feature_distributions(df_train) feature_cols = [ 'Distance to", "drop_columns): ''' Import Final IPO csv that was created in wrangling.ipynb. Here we", "9)) sns.distplot(df[attr], fit=stats.norm) # Get the fitted parameters used by the function (mu,", "dict[ipo_cols[i] + '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data) return ipo_final_ecoded_df def show_correlations_matrix(df,", "of People who are Asian', 'Unemployment Rate', 'Mean Travel Time to Work Estimate", "data to do so. df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor] # dataset that does", "People whose Income in the Past 12 months has been Below Poverty Level',", "of People who are Hispanic', 'Percent of People who are White', 'Percent of", "validation dataset predictions = model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy --> \", model.score(df_validation_x,", "6, i + 1) x = df[col] y = df[label_attr] plt.plot(x, y, 'o')", "plot corr = train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True) plt.show() top_feature = corr.index[abs(corr[label_attr] >", "Final IPO csv that was created in wrangling.ipynb. Here we have every IPO", "df_ipo def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor): # Predicting Median Price of All Homes", "QuantileTransformer from sklearn import ensemble, datasets, metrics from sklearn.utils import shuffle from sklearn.metrics", "#k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y)", "regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train, df_test,", "random_state=42) return X_train, X_validation, Y_train, Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr],", "are Hispanic', 'Percent of People who are White', 'Percent of People whose Income", "Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After Date Filed') df_train_x, df_validation_x, df_train_y,", "Time to Work Estimate (minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Offer", "= train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True) plt.show() top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12,", "Hispanic', 'Percent of People who are White', 'Percent of People who are Black", "no Health Insurance Coverage', 'Percent of People whose Income in the Past 12", "Homes 2 Years After Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB',", "= models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x) return [predictions, predictions_rf, predictions_gb] def create_predictions(predictions, df_x, label_divider):", "= df_test.loc[:, feature_list] X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.2, random_state=42) return", "df[label_attr] plt.plot(x, y, 'o') # Create regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col)", "Household Income Estimate (dollars)', 'Mean Travel Time to Work Estimate (minutes)', 'Median Age',", "RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7) predictions = make_predictions_model(models,", "in Silicon Valley, and each zip code in a 10 mile radius from", "to train the model because it does have 'All Homes 2 Years After", "Year Home Value ratio', 'All Homes Date Filed') #show_correlations_matrix(df_train, ['All Homes 1 Year", "IPO in Silicon Valley, and each zip code in a 10 mile radius", "% (name, cv_results.mean(), cv_results.std()) print(msg) def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed): # prepare the", "to the end :return: ''' encoded_data = [] df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d') for", "Income Greater than $200,000', 'Median Household Income Estimate (dollars)', 'Mean Household Income Estimate", "Level', 'Percent of Households With Income Less Than $24,999', 'Distance to IPO_weighted'] quantile_scaler_normalization_list", "label_attr, feature_list): # Split-out validation dataset X = df_train.loc[:, feature_list] y = df_train[label_attr]", "Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results = [] names = [] for name, model in", "1 df_x[\"Pred House Price RF Change\"] = predictions[1] / df_x[label_divider] - 1 df_x[\"Pred", "X_validation, Y_train, Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm) # Get", "'learning_rate': 0.01, 'loss': 'ls'} model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) # transform the validation", "Asian', 'Percent of People who are Black or African American', 'Percent of People", "\", model.score(df_validation_x, df_validation_y) * 100) return [model, model_rf, model_gb] def make_predictions_model(models, df_test_x): #", "and each zip code in a 10 mile radius from the IPO Zipcode,", "Probablity plot fig = plt.figure() stats.probplot(df[attr], plot=plt) plt.show() def run_ordinary_least_squares(df_x, df_y): model =", "there is any 'NaN' value in column 'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns) return ipo_final_df", "2 Years After Date Filed') df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test,", "Filed', 'Zipcode for Distance', 2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False) if __name__ == \"__main__\":", "KFold, cross_val_score, GridSearchCV from sklearn.pipeline import Pipeline import statsmodels.formula.api as sm from datetime", "line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train, df_test, label_attr,", "models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x) return [predictions, predictions_rf, predictions_gb] def create_predictions(predictions, df_x, label_divider): df_x[\"Pred", "of People who are White', 'Percent of People whose Income in the Past", "cv_results.mean(), cv_results.std()) print(msg) def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed): # prepare the model model", "view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25, 60)) # i: index for i, col in enumerate(feature_list):", "sm from datetime import datetime def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): ''' Import Final IPO", "msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std()) print(msg) def build_models(df_train_x, df_train_y,df_validation_x,", "= model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) params =", "over', 'Percent of Males', 'Percent of Females', 'Percent of People who are Hispanic',", "Filed','All Homes 1 Year Before Date Filed', 'All Homes 2 Years After Date", "= predictions[2] df_x[\"Pred House Price ET Change\"] = predictions[0] / df_x[label_divider] - 1", "X_train, X_validation, Y_train, Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm) #", "20), grid=False); def view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25, 60)) # i: index for i,", "fitted parameters used by the function (mu, sigma) = stats.norm.fit(df[attr]) # plot with", "(name, cv_results.mean(), cv_results.std()) print(msg) def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed): # prepare the model", "KFold(n_splits=num_folds, random_state=seed) cv_results = cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS) results.append(cv_results) names.append(name) msg =", "by the function (mu, sigma) = stats.norm.fit(df[attr]) # plot with the distribution plt.legend(['Normal", "create_predictions(predictions, df_test_x, 'All Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data): feature_cols = [", "- 1 df_x[\"Pred House Price GB Change\"] = predictions[2] /df_x[label_divider] - 1 return", "Test options and evaluation metric using Root Mean Square error method seed =", "years of age', 'Percent of People 65 years and over', 'Percent of Males',", "#view_feature_distributions(df_train) feature_cols = [ 'Distance to IPO_weighted', 'Found_weighted', 'Mean Household Income Estimate (dollars)',", "dataset predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100)", "months has been Below Poverty Level', 'Percent of Population with no Health Insurance", "Population with no Health Insurance Coverage', 'Percent of People whose Income in the", "plt.show() def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list): # Split-out validation dataset X = df_train.loc[:,", "('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models = build_models(df_train_x, df_train_y,df_validation_x,", "Date is Filed and 2 years after the date is filed. ''' ipo_final_df", "with no Health Insurance Coverage', 'Percent of People whose Income in the Past", "'2 Year Home Value ratio', 'All Homes Date Filed') #show_correlations_matrix(df_train, ['All Homes 1", "np import sklearn import matplotlib.pyplot as plt import seaborn as sns from scipy", "=[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models", "0)) df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years After Date Filed', '2", "Age', 'Median Household Income Estimate (dollars)', 'Offer Amount_weighted', 'Per Capita Income Estimate (dollars)',", "Price of All Homes in a Zipcode, and strucuturing data to do so.", "After Date Filed') df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test, 'All Homes", "['Offer Amount', 'Number of Employees', 'Found', 'Distance to IPO'] drop_columns = ['Unnamed: 0',", "['Offer Amount_weighted', 'Number of Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0))", "= max(results.fittedvalues) plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax * 1.1, color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted", "Date, 1 Year after the Date is Filed and 2 years after the", "of Females', 'Percent of People who are Hispanic', 'Percent of People who are", "= filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows = filtered_rows.replace(['--'],", "print(msg) def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed): # prepare the model model = ExtraTreesRegressor(random_state=seed,", "Date Filed', 'Date Filed', 'Zipcode for Distance'] ipo_cols = ['Offer Amount', 'Number of", "Homes 2 Years After Date Filed') df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train,", "After Date Filed', 0.5) #view_feature_distributions(df_train) feature_cols = [ 'Distance to IPO_weighted', 'Found_weighted', 'Mean", "Estimate (dollars)', 'Percent of Females', 'Percent of Households With Income Less Than $24,999',", "df_train_x, df_train_y, cv=kfold, scoring=RMS) results.append(cv_results) names.append(name) msg = \"%s: %f (%f)\" % (name,", "of age', 'Percent of People who are Asian', 'Percent of People who are", "= 7 RMS = 'neg_mean_squared_error' pipelines = [] for i in range(0, len(algs_to_test)):", "prep_train_validation_test_data(df_train, df_test, 'All Homes 2 Years After Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms", "Amount', 'Number of Employees', 'Found', 'Distance to IPO'] drop_columns = ['Unnamed: 0', 'CIK',", "# prepare the model model_rf = RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y) # transform the", "under 18 years of age', 'Percent of People 65 years and over', 'Percent", "def create_encoding_historical_zipcode_data(data): feature_cols = [ 'Mean Household Income Estimate (dollars)', 'Mean Travel Time", "'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns) return ipo_final_df def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max = MinMaxScaler()", "df_train.loc[:, feature_list] y = df_train[label_attr] x_pred_test = df_test.loc[:, feature_list] X_train, X_validation, Y_train, Y_validation", "of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home,", "(minutes)', 'Percent of Households with Income Greater than $200,000', 'Median Household Income Estimate", "(minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)',", "train_test_split from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection import KFold, cross_val_score,", "sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from", "1 Year after the Date is Filed and 2 years after the date", "Here we have every IPO in Silicon Valley, and each zip code in", "df_ipo[df_ipo[label_attr].isna()] # dataset that I will use to train the model because it", "plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm) # Get the fitted parameters used by the function", "from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from", "'Distance to IPO_weighted', 'Found_weighted', 'Mean Household Income Estimate (dollars)', 'Mean Travel Time to", "2 Years After Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After Date Filed')", "7 RMS = 'neg_mean_squared_error' pipelines = [] for i in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0],", "y = df[label_attr] plt.plot(x, y, 'o') # Create regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y,", "plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin = min(results.fittedvalues) xmax = max(results.fittedvalues) plt.hlines(y=0, xmin=xmin * 0.9,", "return [model, model_rf, model_gb] def make_predictions_model(models, df_test_x): # prepare the model predictions =", "60)) # i: index for i, col in enumerate(feature_list): # 3 plots here", "7) predictions = make_predictions_model(models, df_test_x) df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All Homes Date Filed')", "People who are Black or African American', 'Percent of People who are Asian',", "# dataset that does not have 'All Homes 2 Years After Date Filed'", "IPO Zipcode, the demographics of each of those zipcodes, economic data of the", "'Unemployment Rate', 'All Homes Date Filed','All Homes 1 Year Before Date Filed', 'All", "Capita Income Estimate (dollars)', 'Percent of Females', 'Percent of Households With Income Less", "[] for i in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results = []", "learn it, but here I am defining it. weighted average is by time", "create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years After Date Filed', '2 Year Home Value ratio',", "# histograms df.hist(bins=25, figsize=(25, 20), grid=False); def view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25, 60)) #", "the Past 12 months has been Below Poverty Level', 'Percent of Households With", "of Households with Income Greater than $200,000', 'Median Household Income Estimate (dollars)', 'Mean", ")'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') # Probablity plot fig = plt.figure() stats.probplot(df[attr], plot=plt) plt.show()", "2 Years After Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB',", "ensemble, datasets, metrics from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error from sklearn.model_selection", "= filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data) return ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr", "--> \", model.score(df_validation_x, df_validation_y) * 100) params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split':", "from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer from sklearn import", "Filed', 'All Homes 2 Years After Date Filed', 'Date Filed', 'Zipcode for Distance']", "RobustScaler, QuantileTransformer from sklearn import ensemble, datasets, metrics from sklearn.utils import shuffle from", "are White', 'Percent of People who are Black or African American', 'Percent of", "format='%Y-%m-%d') for index, row in df.iterrows(): dict = row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field] >", "Amount_weighted', 'Number of Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train,", "Distance'] ipo_cols = ['Offer Amount', 'Number of Employees', 'Found', 'Distance to IPO'] drop_columns", "no Health Insurance Coverage', 'Unemployment Rate', 'All Homes Date Filed','All Homes 1 Year", "subset=drop_nan_columns) # remove row where if there is any 'NaN' value in column", "df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d') for index, row in df.iterrows(): dict = row.filter(feature_cols).to_dict() filtered_rows", "or learn it, but here I am defining it. weighted average is by", "who are Asian', 'Unemployment Rate', 'Mean Travel Time to Work Estimate (minutes)', 'Percent", "in enumerate(feature_list): # 3 plots here hence 1, 3 plt.subplot(10, 6, i +", "max(results.fittedvalues) plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax * 1.1, color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted values\",", "['All Homes Date Filed','Number of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce',", "historical data --> either I can define it or learn it, but here", "and over', 'Percent of Males', 'Percent of Females', 'Percent of People who are", "of each of those zipcodes, economic data of the zipcode and the home", "df_x[label_divider] - 1 df_x[\"Pred House Price RF Change\"] = predictions[1] / df_x[label_divider] -", "I can define it or learn it, but here I am defining it.", "'All Homes 2 Years After Date Filed', '2 Year Home Value ratio', 'All", "'Median Age', 'Median Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent", "fontsize=18) plt.grid(True) #plt.show() def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y): # Test options and evaluation", "end :return: ''' encoded_data = [] df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d') for index, row", "#run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7) predictions = make_predictions_model(models, df_test_x)", "load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): ''' Import Final IPO csv that was created in wrangling.ipynb.", "prepare the model model = ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y) # transform the validation", "ipo_cols = ['Offer Amount', 'Number of Employees', 'Found', 'Distance to IPO'] drop_columns =", "hence 1, 3 plt.subplot(10, 6, i + 1) x = df[col] y =", "location_field, time_window, feature_cols, ipo_cols): ''' :param df: dataframe with ipo data :param date_field:", "for i in range(0, len(ipo_cols)): dict[ipo_cols[i] + '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df =", "RMS = 'neg_mean_squared_error' pipelines = [] for i in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler',", "import MinMaxScaler, RobustScaler, QuantileTransformer from sklearn import ensemble, datasets, metrics from sklearn.utils import", "that denotes the zipcode demographic and economic data. Within radius of 10 miles", "ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number of Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company", "print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) params = {'n_estimators': 500, 'max_depth': 4,", "of Employees']) for i in range(0, len(ipo_cols)): dict[ipo_cols[i] + '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict)", "(filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows = filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number of", "'_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data) return ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold):", "Change\"] = predictions[2] /df_x[label_divider] - 1 return df_x def main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv',", "am defining it. weighted average is by time differential from beginning of window", "Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Females', 'Percent of Households", "np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list): #", "Estimate (dollars)', 'Percent of Population with no Health Insurance Coverage', 'Percent of People", "df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years After Date Filed', '2 Year", "xmin=xmin * 0.9, xmax=xmax * 1.1, color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\",", "beginning of window to the end :return: ''' encoded_data = [] df[date_field] =", "Date Filed','All Homes 1 Year Before Date Filed', 'Zipcode for Distance', 'Number of", "Population with no Health Insurance Coverage', 'Unemployment Rate', 'All Homes Date Filed','All Homes", "mean_squared_error from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from", "of Population with no Health Insurance Coverage', 'Percent of People whose Income in", "Greater than $200,000', 'Median Household Income Estimate (dollars)', 'Mean Household Income Estimate (dollars)',", "sm.OLS(df_y, df_x) results = model.fit() print(results.summary()) plt.figure(figsize=(8, 5)) p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k')", "min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years", "index, row in df.iterrows(): dict = row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field] > row[date_field]) &", "prepare the model predictions = models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x) return", "histograms df.hist(bins=25, figsize=(25, 20), grid=False); def view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25, 60)) # i:", "= QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def create_test_train_set(df_ipo, label_attr,", "Homes 2 Years After Date Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years def", "Than $24,999', 'Distance to IPO_weighted'] quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number of Employees_weighted'] ipo_final_with_date_filed_home", "18 years of age', 'Percent of People who are Asian', 'Percent of People", "will use to train the model because it does have 'All Homes 2", "cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS) results.append(cv_results) names.append(name) msg = \"%s: %f (%f)\" %", "Silicon Valley, and each zip code in a 10 mile radius from the", "Income Estimate (dollars)', 'Mean Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)',", "return [predictions, predictions_rf, predictions_gb] def create_predictions(predictions, df_x, label_divider): df_x[\"Pred House Price ET\"] =", "'Company Name']) min_max_normalization_list = ['Found_weighted', 'Median Age', 'Percent of People under 18 years", "[ 'Mean Household Income Estimate (dollars)', 'Mean Travel Time to Work Estimate (minutes)',", "> row[date_field]) & (df[date_field] < row[date_field] + np.timedelta64(time_window, 'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field] ==", "y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list): # Split-out", "corr = train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True) plt.show() top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)]", "'loss': 'ls'} model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) # transform the validation dataset predictions_gb", "Zipcode, the demographics of each of those zipcodes, economic data of the zipcode", "= build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7) predictions = make_predictions_model(models, df_test_x) df_test_x_with_pred = create_predictions(predictions, df_test_x,", "of the zipcode and the home prices at the Date Filed Time, the", "2 years after the date is filed. ''' ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df", "drop_columns, label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns) train_corr.shape # Correlation plot corr", "in wrangling.ipynb. Here we have every IPO in Silicon Valley, and each zip", "(dollars)', 'Percent of Population with no Health Insurance Coverage', 'Percent of People whose", "#run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20,", "I will use to train the model because it does have 'All Homes", "''' ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row where", "attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm) # Get the fitted parameters used by the", "feature_cols = [ 'Distance to IPO_weighted', 'Found_weighted', 'Mean Household Income Estimate (dollars)', 'Mean", "AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y,", "plt.show() top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr = df[top_feature].corr() sns.heatmap(top_corr, annot=True)", "feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]]", "8)) top_corr = df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation between features'); plt.show() def view_feature_distributions(df): #", "# Predicting Median Price of All Homes in a Zipcode, and strucuturing data", "Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home,", "Females', 'Percent of People who are Hispanic', 'Percent of People who are White',", "'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.01, 'loss': 'ls'} model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y)", "Work Estimate (minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Per Capita Income", "of Males', 'Percent of Females', 'Percent of People who are Hispanic', 'Percent of", "in a 10 mile radius from the IPO Zipcode, the demographics of each", "df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list])", "top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr = df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation", "IPO_weighted'] quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number of Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list)", "Date Filed Time, the Lockup Date, 1 Year after the Date is Filed", ":param location_field: field that denotes the zipcode demographic and economic data. Within radius", "run_ordinary_least_squares(df_x, df_y): model = sm.OLS(df_y, df_x) results = model.fit() print(results.summary()) plt.figure(figsize=(8, 5)) p", "= train_test_split(X, y, test_size=0.2, random_state=42) return X_train, X_validation, Y_train, Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df,", "home prices at the Date Filed Time, the Lockup Date, 1 Year after", "Homes 2 Years After Date Filed', 'Date Filed', 'Zipcode for Distance'] ipo_cols =", "model = sm.OLS(df_y, df_x) results = model.fit() print(results.summary()) plt.figure(figsize=(8, 5)) p = plt.scatter(x=results.fittedvalues,", "<reponame>apthomas/SF-home-price-prediction<filename>SF-home-price-prediction/src/learning.py<gh_stars>0 import pandas as pd import numpy as np import sklearn import matplotlib.pyplot", "df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test, 'All Homes 2 Years After Date Filed',", "'All Homes 2 Years After Date Filed', 0.5) #view_feature_distributions(df_train) feature_cols = [ 'Distance", "$24,999', 'Percent of Households with Income Greater than $200,000', 'Percent of Males', 'Percent", "sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.pipeline import Pipeline import statsmodels.formula.api as sm", "the validation dataset predictions = model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy --> \",", "df_x) results = model.fit() print(results.summary()) plt.figure(figsize=(8, 5)) p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin", "df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test, 'All Homes 2 Years After Date Filed', feature_cols)", "GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.pipeline import Pipeline", "under 18 years of age', 'Percent of People who are Asian', 'Percent of", "df: dataframe with ipo data :param date_field: field that will be used to", "scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor): # Predicting Median Price of", "filtered_rows = filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number of Employees']) for", "1 df_x[\"Pred House Price GB Change\"] = predictions[2] /df_x[label_divider] - 1 return df_x", "Date Filed', 'Zipcode for Distance', 'Number of Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes 2 Years", "'o') # Create regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show()", "Estimate (minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Offer Amount_weighted', 'Per Capita", "from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor,", "df_x[\"Pred House Price GB\"] = predictions[2] df_x[\"Pred House Price ET Change\"] = predictions[0]", "Name']) min_max_normalization_list = ['Found_weighted', 'Median Age', 'Percent of People under 18 years of", "'All Homes Date Filed','All Homes 1 Year Before Date Filed', 'Zipcode for Distance',", "code in a 10 mile radius from the IPO Zipcode, the demographics of", "Date Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df, date_field, location_field, time_window,", "Date Filed', 'All Homes Lockup Expiration Date'], 'All Homes 2 Years After Date", "params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.01, 'loss': 'ls'} model_gb", "df_x[\"Pred House Price RF\"] = predictions[1] df_x[\"Pred House Price GB\"] = predictions[2] df_x[\"Pred", "%f (%f)\" % (name, cv_results.mean(), cv_results.std()) print(msg) def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed): #", "ipo_final_ecoded_df = pd.DataFrame(encoded_data) return ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number]) train_corr", "sns.heatmap(corr, annot=True) plt.show() top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr = df[top_feature].corr()", "algs_to_test, df_train_x, df_train_y): # Test options and evaluation metric using Root Mean Square", "and the home prices at the Date Filed Time, the Lockup Date, 1", "(%f)\" % (name, cv_results.mean(), cv_results.std()) print(msg) def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed): # prepare", "df[col] y = df[label_attr] plt.plot(x, y, 'o') # Create regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x,", "seaborn as sns from scipy import stats from sklearn import preprocessing from sklearn.preprocessing", "'Distance to IPO_weighted'] quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number of Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home,", "min(results.fittedvalues) xmax = max(results.fittedvalues) plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax * 1.1, color='red', linestyle='--',", "# prepare the model predictions = models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x)", "pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results = [] names = [] for name, model", "ET Change\"] = predictions[0] / df_x[label_divider] - 1 df_x[\"Pred House Price RF Change\"]", "0', 'CIK', 'Company Name']) min_max_normalization_list = ['Found_weighted', 'Median Age', 'Percent of People under", "ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All", "index for i, col in enumerate(feature_list): # 3 plots here hence 1, 3", "of age', 'Percent of People 65 years and over', 'Percent of Males', 'Percent", "label_divider): df_x[\"Pred House Price ET\"] = predictions[0] df_x[\"Pred House Price RF\"] = predictions[1]", "predictions)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) # prepare the model model_rf", "is any 'NaN' value in column 'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns) return ipo_final_df def", "used for encoding and prediction. Likely 2 years. Decisions: weighted average of encoded", "'Percent of Population with no Health Insurance Coverage', 'Unemployment Rate', 'All Homes Date", "2 years. Decisions: weighted average of encoded historical data --> either I can", "'Number of Employees', 'Found', 'Distance to IPO'] drop_columns = ['Unnamed: 0', 'CIK', 'Company", "= min(results.fittedvalues) xmax = max(results.fittedvalues) plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax * 1.1, color='red',", "not have 'All Homes 2 Years After Date Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] #", "age', 'Percent of People who are Asian', 'Percent of People who are Black", "evaluation metric using Root Mean Square error method seed = 7 RMS =", "xmax = max(results.fittedvalues) plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax * 1.1, color='red', linestyle='--', lw=3)", "Mean Square error method seed = 7 RMS = 'neg_mean_squared_error' pipelines = []", "Amount_weighted', 'Per Capita Income Estimate (dollars)', 'Percent of Females', 'Percent of Households With", "model predictions = models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x) return [predictions, predictions_rf,", "plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') # Probablity", "of People under 18 years of age', 'Percent of People who are Asian',", "stats.norm.fit(df[attr]) # plot with the distribution plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f}", "i in range(0, len(ipo_cols)): dict[ipo_cols[i] + '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data)", "i + 1) x = df[col] y = df[label_attr] plt.plot(x, y, 'o') #", "for name, model in pipelines: kfold = KFold(n_splits=num_folds, random_state=seed) cv_results = cross_val_score(model, df_train_x,", "prediction. Likely 2 years. Decisions: weighted average of encoded historical data --> either", "= KFold(n_splits=num_folds, random_state=seed) cv_results = cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS) results.append(cv_results) names.append(name) msg", "does not have 'All Homes 2 Years After Date Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()]", "it does have 'All Homes 2 Years After Date Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()]", "After Date Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] # dataset that I will use to", "Year After Date Filed', 'All Homes Lockup Expiration Date'], 'All Homes 2 Years", "range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results = [] names = [] for", "who are White', 'Percent of People whose Income in the Past 12 months", "Travel Time to Work Estimate (minutes)', 'Percent of Households with Income Greater than", "= df_train[label_attr] x_pred_test = df_test.loc[:, feature_list] X_train, X_validation, Y_train, Y_validation = train_test_split(X, y,", "with Income Greater than $200,000', 'Percent of Males', 'Percent of People 65 years", "\", model.score(df_validation_x, df_validation_y) * 100) params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,", "'Distance to IPO'] drop_columns = ['Unnamed: 0', 'CIK', 'Company Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data,", "0.5) #view_feature_distributions(df_train) feature_cols = [ 'Distance to IPO_weighted', 'Found_weighted', 'Mean Household Income Estimate", "Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes Date Filed','Number of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed']", "def main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number of Employees_weighted'], ['Unnamed: 0',", "= ['Offer Amount', 'Number of Employees', 'Found', 'Distance to IPO'] drop_columns = ['Unnamed:", "df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor): # Predicting Median", "as sns from scipy import stats from sklearn import preprocessing from sklearn.preprocessing import", "each zip code in a 10 mile radius from the IPO Zipcode, the", "People 65 years and over', 'Percent of People under 18 years of age',", "scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo", "len(ipo_cols)): dict[ipo_cols[i] + '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data) return ipo_final_ecoded_df def", "''' :param df: dataframe with ipo data :param date_field: field that will be", "'CIK', 'Company Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes Date Filed','Number of Employees'], drop_columns)", "'Percent of People who are Asian', 'Percent of People who are Black or", "as sm from datetime import datetime def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): ''' Import Final", "have every IPO in Silicon Valley, and each zip code in a 10", "df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols, ipo_cols): ''' :param", "(dollars)', 'Offer Amount_weighted', 'Per Capita Income Estimate (dollars)', 'Percent of Females', 'Percent of", "models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7) predictions = make_predictions_model(models, df_test_x) df_test_x_with_pred = create_predictions(predictions,", "'All Homes 2 Years After Date Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years", "* 1.1, color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs. residuals", "predictions_rf = models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x) return [predictions, predictions_rf, predictions_gb] def create_predictions(predictions, df_x,", "# Correlation plot corr = train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True) plt.show() top_feature =", "row where if there is any 'NaN' value in column 'A' #ipo_final_df =", "or African American', 'Percent of People who are Asian', 'Unemployment Rate', 'Mean Travel", "of Households With Income Less Than $24,999', 'Percent of Households with Income Greater", "def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): ''' Import Final IPO csv that was created in", "sklearn import ensemble, datasets, metrics from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error", "American', 'Percent of People who are Hispanic', 'Percent of People who are White',", "= predictions[1] / df_x[label_divider] - 1 df_x[\"Pred House Price GB Change\"] = predictions[2]", "df_validation_y) * 100) return [model, model_rf, model_gb] def make_predictions_model(models, df_test_x): # prepare the", "field that denotes the zipcode demographic and economic data. Within radius of 10", "+ np.timedelta64(time_window, 'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test'] =", "People who are White', 'Percent of People who are Black or African American',", "{:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') # Probablity plot fig =", "models[2].predict(df_test_x) return [predictions, predictions_rf, predictions_gb] def create_predictions(predictions, df_x, label_divider): df_x[\"Pred House Price ET\"]", "scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def create_test_train_set(df_ipo,", "dataset that I will use to train the model because it does have", "Years After Date Filed', 'Date Filed', 'Zipcode for Distance'] ipo_cols = ['Offer Amount',", "Black or African American', 'Percent of People who are Asian', 'Unemployment Rate', 'Mean", "df.hist(bins=25, figsize=(25, 20), grid=False); def view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25, 60)) # i: index", "People who are White', 'Percent of People whose Income in the Past 12", "'Mean Household Income Estimate (dollars)', 'Mean Travel Time to Work Estimate (minutes)', 'Median", "name, model in pipelines: kfold = KFold(n_splits=num_folds, random_state=seed) cv_results = cross_val_score(model, df_train_x, df_train_y,", "GB Change\"] = predictions[2] /df_x[label_divider] - 1 return df_x def main_build_predictions(): ipo_final_with_date_filed_home =", "dataset that does not have 'All Homes 2 Years After Date Filed' df_test_set_2_years", "GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7) predictions", "(dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Population with no Health Insurance", "= MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list]", "Homes Date Filed','Number of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d')", "import statsmodels.formula.api as sm from datetime import datetime def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): '''", "for Distance', 2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False) if __name__ == \"__main__\": print(\"we are", "df_validation_y) * 100) # prepare the model model_rf = RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y)", "All Homes in a Zipcode, and strucuturing data to do so. df_ipo[ratio_label] =", "= plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin = min(results.fittedvalues) xmax = max(results.fittedvalues) plt.hlines(y=0, xmin=xmin *", "plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list): # Split-out validation dataset", "x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm) # Get the fitted parameters", "average of encoded historical data --> either I can define it or learn", "= ['Unnamed: 0', 'CIK', 'Company Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes Date Filed','Number", "df[(df[date_field] > row[date_field]) & (df[date_field] < row[date_field] + np.timedelta64(time_window, 'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field]", "> correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr = df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation between features'); plt.show()", "seed): # prepare the model model = ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y) # transform", "transform the validation dataset predictions = model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy -->", "Create regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train,", "scipy import stats from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer", "* 100) # prepare the model model_rf = RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y) #", "1 Year After Date Filed', 'All Homes Lockup Expiration Date'], 'All Homes 2", "Homes Lockup Expiration Date'], 'All Homes 2 Years After Date Filed', 0.5) #view_feature_distributions(df_train)", "label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns) train_corr.shape # Correlation plot corr =", "Filed and 2 years after the date is filed. ''' ipo_final_df = pd.read_csv(datafile,", "Get the fitted parameters used by the function (mu, sigma) = stats.norm.fit(df[attr]) #", "feature_list] X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.2, random_state=42) return X_train, X_validation,", "\", model.score(df_validation_x, df_validation_y) * 100) # prepare the model model_rf = RandomForestRegressor(random_state=seed, n_estimators=100)", "Filed') #show_correlations_matrix(df_train, ['All Homes 1 Year After Date Filed', 'All Homes Lockup Expiration", "the zipcode and the home prices at the Date Filed Time, the Lockup", "prepare the model model_rf = RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y) # transform the validation", "used to create time windows :param location_field: field that denotes the zipcode demographic", "create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols, ipo_cols): ''' :param df: dataframe with ipo data", "min_max_list, quantile_scaler_list): scaler_min_max = MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list]", "df_x[\"Pred House Price GB Change\"] = predictions[2] /df_x[label_divider] - 1 return df_x def", "Income Less Than $24,999', 'Distance to IPO_weighted'] quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number of", "Y_validation = train_test_split(X, y, test_size=0.2, random_state=42) return X_train, X_validation, Y_train, Y_validation, x_pred_test def", "df_test_set_2_years def create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols, ipo_cols): ''' :param df: dataframe with", "sigma)], loc='best') plt.ylabel('Frequency') # Probablity plot fig = plt.figure() stats.probplot(df[attr], plot=plt) plt.show() def", "index=False) def create_encoding_historical_zipcode_data(data): feature_cols = [ 'Mean Household Income Estimate (dollars)', 'Mean Travel", "Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Population with", "value in column 'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns) return ipo_final_df def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list):", "Estimate (dollars)', 'Offer Amount_weighted', 'Per Capita Income Estimate (dollars)', 'Percent of Females', 'Percent", "df_train_y) models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7) predictions = make_predictions_model(models, df_test_x) df_test_x_with_pred =", "After Date Filed', 'Date Filed', 'Zipcode for Distance'] ipo_cols = ['Offer Amount', 'Number", "pipelines = [] for i in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results", "sigma) = stats.norm.fit(df[attr]) # plot with the distribution plt.legend(['Normal dist. ($\\mu=$ {:.2f} and", "be used to create time windows :param location_field: field that denotes the zipcode", "import KFold, cross_val_score, GridSearchCV from sklearn.pipeline import Pipeline import statsmodels.formula.api as sm from", "of People who are White', 'Percent of People who are Black or African", "= {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.01, 'loss': 'ls'} model_gb =", "has been Below Poverty Level', 'Percent of Households With Income Less Than $24,999',", "Y_train, Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm) # Get the", "model = ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y) # transform the validation dataset predictions =", "validation dataset predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) *", "who are Black or African American', 'Percent of People who are Asian', 'Unemployment", "1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list): # Split-out validation", "'All Homes 2 Years After Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After", "= predictions[0] / df_x[label_divider] - 1 df_x[\"Pred House Price RF Change\"] = predictions[1]", "= filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] =", "economic data of the zipcode and the home prices at the Date Filed", "weighted average is by time differential from beginning of window to the end", "= df_ipo[df_ipo[label_attr].isna()] # dataset that I will use to train the model because", "Lockup Date, 1 Year after the Date is Filed and 2 years after", "'Date Filed', 'Zipcode for Distance'] ipo_cols = ['Offer Amount', 'Number of Employees', 'Found',", "# prepare the model model = ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y) # transform the", "to IPO_weighted', 'Found_weighted', 'Mean Household Income Estimate (dollars)', 'Mean Travel Time to Work", "as plt import seaborn as sns from scipy import stats from sklearn import", "filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window,", "p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin = min(results.fittedvalues) xmax = max(results.fittedvalues) plt.hlines(y=0, xmin=xmin", "the Date Filed Time, the Lockup Date, 1 Year after the Date is", "'Percent of Households with Income Greater than $200,000', 'Median Household Income Estimate (dollars)',", "GB\"] = predictions[2] df_x[\"Pred House Price ET Change\"] = predictions[0] / df_x[label_divider] -", "Income Estimate (dollars)', 'Mean Travel Time to Work Estimate (minutes)', 'Median Age', 'Median", "Filed','All Homes 1 Year Before Date Filed', 'Zipcode for Distance', 'Number of Employees_weighted']", "print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) return [model, model_rf, model_gb] def make_predictions_model(models,", "the validation dataset predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y)", "kfold = KFold(n_splits=num_folds, random_state=seed) cv_results = cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS) results.append(cv_results) names.append(name)", "#print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) # prepare the", "features'); plt.show() def view_feature_distributions(df): # histograms df.hist(bins=25, figsize=(25, 20), grid=False); def view_residual_feature_plots(df, label_attr,", "People who are Black or African American', 'Percent of People who are Hispanic',", "plt.subplots(figsize=(12, 8)) top_corr = df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation between features'); plt.show() def view_feature_distributions(df):", "values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs. residuals plot\", fontsize=18) plt.grid(True) #plt.show() def run_k_folds(num_folds,", "train_corr = train_corr.drop(columns=drop_columns) train_corr.shape # Correlation plot corr = train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr,", "= pd.to_datetime(df[date_field], format='%Y-%m-%d') for index, row in df.iterrows(): dict = row.filter(feature_cols).to_dict() filtered_rows =", "Years After Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After Date Filed') df_train_x,", "Black or African American', 'Percent of People who are Hispanic', 'Percent of People", "who are Black or African American', 'Percent of People who are Hispanic', 'Percent", "at the Date Filed Time, the Lockup Date, 1 Year after the Date", "predictions = make_predictions_model(models, df_test_x) df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\",", "Change\"] = predictions[1] / df_x[label_divider] - 1 df_x[\"Pred House Price GB Change\"] =", "White', 'Percent of People who are Black or African American', 'Percent of People", "# Probablity plot fig = plt.figure() stats.probplot(df[attr], plot=plt) plt.show() def run_ordinary_least_squares(df_x, df_y): model", "Import Final IPO csv that was created in wrangling.ipynb. Here we have every", "and economic data. Within radius of 10 miles of IPO :param time_window: time", "fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs. residuals plot\", fontsize=18) plt.grid(True) #plt.show() def run_k_folds(num_folds, algs_to_test,", "ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y) # transform the validation dataset predictions = model.predict(df_validation_x) #print(predictions)", "($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') # Probablity plot fig", "scoring=RMS) results.append(cv_results) names.append(name) msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std()) print(msg)", "of People whose Income in the Past 12 months has been Below Poverty", "method seed = 7 RMS = 'neg_mean_squared_error' pipelines = [] for i in", "model_gb.fit(df_train_x, df_train_y) # transform the validation dataset predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy", "that I will use to train the model because it does have 'All", "does have 'All Homes 2 Years After Date Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return", "/ df_x[label_divider] - 1 df_x[\"Pred House Price GB Change\"] = predictions[2] /df_x[label_divider] -", "df_train[label_attr] x_pred_test = df_test.loc[:, feature_list] X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.2,", "Date Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] # dataset that I will use to train", "'Median Household Income Estimate (dollars)', 'Offer Amount_weighted', 'Per Capita Income Estimate (dollars)', 'Percent", "pipelines: kfold = KFold(n_splits=num_folds, random_state=seed) cv_results = cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS) results.append(cv_results)", "remove row where if there is any 'NaN' value in column 'A' #ipo_final_df", "train_corr.drop(columns=drop_columns) train_corr.shape # Correlation plot corr = train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True) plt.show()", "to Work Estimate (minutes)', 'Percent of Households with Income Greater than $200,000', 'Median", "len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results = [] names = [] for name,", "ipo_final_df.drop(columns=drop_columns) return ipo_final_df def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max = MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform(", "Filed','Number of Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company Name']) min_max_normalization_list = ['Found_weighted', 'Median Age',", "Years After Date Filed', 0.5) #view_feature_distributions(df_train) feature_cols = [ 'Distance to IPO_weighted', 'Found_weighted',", "of Males', 'Percent of People 65 years and over', 'Percent of People under", "= ipo_final_df.drop(columns=drop_columns) return ipo_final_df def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max = MinMaxScaler() df_ipo[min_max_list] =", "y = df_train[label_attr] x_pred_test = df_test.loc[:, feature_list] X_train, X_validation, Y_train, Y_validation = train_test_split(X,", "quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number of Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis", "in a Zipcode, and strucuturing data to do so. df_ipo[ratio_label] = df_ipo[label_attr] /", "are Black or African American', 'Percent of People who are Asian', 'Unemployment Rate',", "Filed', 'Zipcode for Distance', 'Number of Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes 2 Years After", "denotes the zipcode demographic and economic data. Within radius of 10 miles of", "of IPO :param time_window: time window used for encoding and prediction. Likely 2", "feature_list): plt.figure(figsize=(25, 60)) # i: index for i, col in enumerate(feature_list): # 3", "miles of IPO :param time_window: time window used for encoding and prediction. Likely", "metrics from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split", "defining it. weighted average is by time differential from beginning of window to", "* 100) params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.01, 'loss':", "df_x[\"Pred House Price ET\"] = predictions[0] df_x[\"Pred House Price RF\"] = predictions[1] df_x[\"Pred", "numpy as np import sklearn import matplotlib.pyplot as plt import seaborn as sns", ":param date_field: field that will be used to create time windows :param location_field:", "= stats.norm.fit(df[attr]) # plot with the distribution plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$", "def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max = MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile =", "from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection", "= load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number of Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company Name'])", "def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm) # Get the fitted parameters used", "Households with Income Greater than $200,000', 'Median Household Income Estimate (dollars)', 'Mean Household", ":param df: dataframe with ipo data :param date_field: field that will be used", "predictions_rf, predictions_gb] def create_predictions(predictions, df_x, label_divider): df_x[\"Pred House Price ET\"] = predictions[0] df_x[\"Pred", "date_field, location_field, time_window, feature_cols, ipo_cols): ''' :param df: dataframe with ipo data :param", "row[location_field]] filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows", "African American', 'Percent of People who are Hispanic', 'Percent of People who are", "+ '_weighted'] = filtered_rows[\"time_weight\"].dot(filtered_rows[ipo_cols[i]]) encoded_data.append(dict) ipo_final_ecoded_df = pd.DataFrame(encoded_data) return ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns,", "- 1 return df_x def main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number", "Insurance Coverage', 'Percent of People whose Income in the Past 12 months has", "('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7)", "import datetime def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): ''' Import Final IPO csv that was", "are Asian', 'Unemployment Rate', 'Mean Travel Time to Work Estimate (minutes)', 'Percent of", "row[date_field] + np.timedelta64(time_window, 'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test']", "date_field: field that will be used to create time windows :param location_field: field", "color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs. residuals plot\", fontsize=18)", "than $200,000', 'Percent of Males', 'Percent of People 65 years and over', 'Percent", "= df[label_attr] plt.plot(x, y, 'o') # Create regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)))", "Employees', 'Found', 'Distance to IPO'] drop_columns = ['Unnamed: 0', 'CIK', 'Company Name'] ipo_final_with_date_filed_home", "algs_to_test[i][1]]))) results = [] names = [] for name, model in pipelines: kfold", "= df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation between features'); plt.show() def view_feature_distributions(df): # histograms df.hist(bins=25,", "Males', 'Percent of Females', 'Percent of People who are Hispanic', 'Percent of People", "filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number of Employees']) for i in range(0, len(ipo_cols)): dict[ipo_cols[i]", "function (mu, sigma) = stats.norm.fit(df[attr]) # plot with the distribution plt.legend(['Normal dist. ($\\mu=$", "$\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') # Probablity plot fig = plt.figure() stats.probplot(df[attr],", "the function (mu, sigma) = stats.norm.fit(df[attr]) # plot with the distribution plt.legend(['Normal dist.", "model.fit() print(results.summary()) plt.figure(figsize=(8, 5)) p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin = min(results.fittedvalues) xmax", "# Split-out validation dataset X = df_train.loc[:, feature_list] y = df_train[label_attr] x_pred_test =", "has been Below Poverty Level', 'Percent of Population with no Health Insurance Coverage',", "from beginning of window to the end :return: ''' encoded_data = [] df[date_field]", "n_estimators=100) model.fit(df_train_x, df_train_y) # transform the validation dataset predictions = model.predict(df_validation_x) #print(predictions) #print(df_test_y)", "who are Hispanic', 'Percent of People who are White', 'Percent of People who", "House Price ET\"] = predictions[0] df_x[\"Pred House Price RF\"] = predictions[1] df_x[\"Pred House", "-row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows = filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number of Employees']", "model_rf, model_gb] def make_predictions_model(models, df_test_x): # prepare the model predictions = models[0].predict(df_test_x) predictions_rf", "statsmodels.formula.api as sm from datetime import datetime def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): ''' Import", "results = model.fit() print(results.summary()) plt.figure(figsize=(8, 5)) p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin =", "Males', 'Percent of People 65 years and over', 'Percent of People under 18", "the validation dataset predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y)", "''' Import Final IPO csv that was created in wrangling.ipynb. Here we have", "Rate', 'All Homes Date Filed','All Homes 1 Year Before Date Filed', 'Zipcode for", "< row[date_field] + np.timedelta64(time_window, 'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index = filtered_rows.index.map(str)", "of encoded historical data --> either I can define it or learn it,", "ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes Date Filed','Number of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed'] =", "'Percent of People whose Income in the Past 12 months has been Below", "Insurance Coverage', 'Unemployment Rate', 'All Homes Date Filed','All Homes 1 Year Before Date", "df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] # dataset that I will use to train the model", "import seaborn as sns from scipy import stats from sklearn import preprocessing from", "cv_results = cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS) results.append(cv_results) names.append(name) msg = \"%s: %f", "shuffle from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor,", "plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list):", "plt.ylabel('prices') plt.show() def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list): # Split-out validation dataset X =", "edgecolor='k') xmin = min(results.fittedvalues) xmax = max(results.fittedvalues) plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax *", "or African American', 'Percent of People who are Hispanic', 'Percent of People who", "= ['Offer Amount_weighted', 'Number of Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis =", "fig = plt.figure() stats.probplot(df[attr], plot=plt) plt.show() def run_ordinary_least_squares(df_x, df_y): model = sm.OLS(df_y, df_x)", "Rate', 'Mean Travel Time to Work Estimate (minutes)', 'Percent of Households with Income", "'ls'} model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) # transform the validation dataset predictions_gb =", "to Work Estimate (minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Per Capita", "distribution plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') #", "= sm.OLS(df_y, df_x) results = model.fit() print(results.summary()) plt.figure(figsize=(8, 5)) p = plt.scatter(x=results.fittedvalues, y=results.resid,", "Homes Date Filed') #show_correlations_matrix(df_train, ['All Homes 1 Year After Date Filed', 'All Homes", "of 10 miles of IPO :param time_window: time window used for encoding and", "train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True) plt.show() top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12, 8))", "ratio', 'All Homes Date Filed') #show_correlations_matrix(df_train, ['All Homes 1 Year After Date Filed',", "economic data. Within radius of 10 miles of IPO :param time_window: time window", "model_rf = RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y) # transform the validation dataset predictions_rf =", "1 Year Before Date Filed', 'All Homes 2 Years After Date Filed', 'Date", "Expiration Date'], 'All Homes 2 Years After Date Filed', 0.5) #view_feature_distributions(df_train) feature_cols =", "After Date Filed', 'All Homes Lockup Expiration Date'], 'All Homes 2 Years After", "predictions[0] df_x[\"Pred House Price RF\"] = predictions[1] df_x[\"Pred House Price GB\"] = predictions[2]", "average is by time differential from beginning of window to the end :return:", "Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After Date Filed') df_train_x, df_validation_x, df_train_y, df_validation_y,", "['Unnamed: 0', 'CIK', 'Company Name']) min_max_normalization_list = ['Found_weighted', 'Median Age', 'Percent of People", "# dataset that I will use to train the model because it does", "define it or learn it, but here I am defining it. weighted average", "whose Income in the Past 12 months has been Below Poverty Level', 'Percent", "Health Insurance Coverage', 'Unemployment Rate', 'All Homes Date Filed','All Homes 1 Year Before", "cv_results.std()) print(msg) def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed): # prepare the model model =", "strucuturing data to do so. df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor] # dataset that", "Years After Date Filed') df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test, 'All", "y, 'o') # Create regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices')", "def create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols, ipo_cols): ''' :param df: dataframe with ipo", "'Median Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Females',", "radius from the IPO Zipcode, the demographics of each of those zipcodes, economic", "2 Years After Date Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df,", "# Create regression line plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x))) plt.title(col) plt.xlabel(col) plt.ylabel('prices') plt.show() def", "stats.probplot(df[attr], plot=plt) plt.show() def run_ordinary_least_squares(df_x, df_y): model = sm.OLS(df_y, df_x) results = model.fit()", "(mu, sigma) = stats.norm.fit(df[attr]) # plot with the distribution plt.legend(['Normal dist. ($\\mu=$ {:.2f}", "Income Greater than $200,000', 'Percent of Males', 'Percent of People 65 years and", "'Y')) filtered_rows = filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number of Employees'])", "Age', 'Median Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of", "ET\"] = predictions[0] df_x[\"Pred House Price RF\"] = predictions[1] df_x[\"Pred House Price GB\"]", "than $200,000', 'Median Household Income Estimate (dollars)', 'Mean Household Income Estimate (dollars)', 'Per", "MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] =", "if there is any 'NaN' value in column 'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns) return", "it, but here I am defining it. weighted average is by time differential", "corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr = df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation between features');", "zipcodes, economic data of the zipcode and the home prices at the Date", "created in wrangling.ipynb. Here we have every IPO in Silicon Valley, and each", "ipo data :param date_field: field that will be used to create time windows", "grid=False); def view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25, 60)) # i: index for i, col", "model because it does have 'All Homes 2 Years After Date Filed' df_train_set_2_years", "we have every IPO in Silicon Valley, and each zip code in a", "'Percent of Population with no Health Insurance Coverage', 'Percent of People whose Income", "plot with the distribution plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)],", "Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] # dataset that I will use to train the", "time_window: time window used for encoding and prediction. Likely 2 years. Decisions: weighted", "Split-out validation dataset X = df_train.loc[:, feature_list] y = df_train[label_attr] x_pred_test = df_test.loc[:,", "/ df_x[label_divider] - 1 df_x[\"Pred House Price RF Change\"] = predictions[1] / df_x[label_divider]", "Below Poverty Level', 'Percent of Households With Income Less Than $24,999', 'Distance to", "'All Homes Date Filed') #show_correlations_matrix(df_train, ['All Homes 1 Year After Date Filed', 'All", "* 0.9, xmax=xmax * 1.1, color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15)", "model_rf.fit(df_train_x, df_train_y) # transform the validation dataset predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy", "Date Filed','Number of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df", "Date Filed', 'All Homes 2 Years After Date Filed', 'Date Filed', 'Zipcode for", "filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows = filtered_rows.replace(['--'], [1],", "'Median Household Income Estimate (dollars)', 'Mean Household Income Estimate (dollars)', 'Per Capita Income", "/ df_ipo[ratio_divisor] # dataset that does not have 'All Homes 2 Years After", "'Unemployment Rate', 'All Homes Date Filed','All Homes 1 Year Before Date Filed', 'Zipcode", "Health Insurance Coverage', 'Percent of People whose Income in the Past 12 months", "plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax * 1.1, color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted values\", fontsize=15)", "Date Filed','Number of Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company Name']) min_max_normalization_list = ['Found_weighted', 'Median", "of People under 18 years of age', 'Percent of People 65 years and", "'Found_weighted', 'Mean Household Income Estimate (dollars)', 'Mean Travel Time to Work Estimate (minutes)',", "'Per Capita Income Estimate (dollars)', 'Percent of Females', 'Percent of Households With Income", "import ensemble, datasets, metrics from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error from", "train the model because it does have 'All Homes 2 Years After Date", "= load_processed_ipo_data(data, ['All Homes Date Filed','Number of Employees'], drop_columns) #ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date", "it or learn it, but here I am defining it. weighted average is", "'neg_mean_squared_error' pipelines = [] for i in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]])))", "# remove row where if there is any 'NaN' value in column 'A'", "'Percent of Households With Income Less Than $24,999', 'Distance to IPO_weighted'] quantile_scaler_normalization_list =", "create_predictions(predictions, df_x, label_divider): df_x[\"Pred House Price ET\"] = predictions[0] df_x[\"Pred House Price RF\"]", "of Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company Name']) min_max_normalization_list = ['Found_weighted', 'Median Age', 'Percent", "that was created in wrangling.ipynb. Here we have every IPO in Silicon Valley,", "predictions[2] /df_x[label_divider] - 1 return df_x def main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes", "500, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.01, 'loss': 'ls'} model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x,", "are Hispanic', 'Percent of People who are White', 'Percent of People who are", "Income in the Past 12 months has been Below Poverty Level', 'Percent of", "Price ET\"] = predictions[0] df_x[\"Pred House Price RF\"] = predictions[1] df_x[\"Pred House Price", "Income Estimate (dollars)', 'Percent of Population with no Health Insurance Coverage', 'Percent of", "= df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns) train_corr.shape # Correlation plot corr = train_corr.corr() plt.subplots(figsize=(20,", "Date Filed','All Homes 1 Year Before Date Filed', 'All Homes 2 Years After", "with the distribution plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best')", "Y_train, Y_validation = train_test_split(X, y, test_size=0.2, random_state=42) return X_train, X_validation, Y_train, Y_validation, x_pred_test", "so. df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor] # dataset that does not have 'All", "Value ratio', 'All Homes Date Filed') #show_correlations_matrix(df_train, ['All Homes 1 Year After Date", "create time windows :param location_field: field that denotes the zipcode demographic and economic", "Below Poverty Level', 'Percent of Population with no Health Insurance Coverage', 'Unemployment Rate',", "predictions = model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) *", "col in enumerate(feature_list): # 3 plots here hence 1, 3 plt.subplot(10, 6, i", "(dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Females', 'Percent of Households With", "RF Change\"] = predictions[1] / df_x[label_divider] - 1 df_x[\"Pred House Price GB Change\"]", "df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x,", "pandas as pd import numpy as np import sklearn import matplotlib.pyplot as plt", "of Households with Income Greater than $200,000', 'Percent of Males', 'Percent of People", "'Found', 'Distance to IPO'] drop_columns = ['Unnamed: 0', 'CIK', 'Company Name'] ipo_final_with_date_filed_home =", "seed = 7 RMS = 'neg_mean_squared_error' pipelines = [] for i in range(0,", "df_train_y) # transform the validation dataset predictions = model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions))", "datasets, metrics from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error from sklearn.model_selection import", "Coverage', 'Percent of People whose Income in the Past 12 months has been", "Households with Income Greater than $200,000', 'Percent of Males', 'Percent of People 65", "Time to Work Estimate (minutes)', 'Percent of Households with Income Greater than $200,000',", "= [] names = [] for name, model in pipelines: kfold = KFold(n_splits=num_folds,", "With Income Less Than $24,999', 'Percent of Households with Income Greater than $200,000',", "GridSearchCV from sklearn.pipeline import Pipeline import statsmodels.formula.api as sm from datetime import datetime", "as pd import numpy as np import sklearn import matplotlib.pyplot as plt import", "= [ 'Distance to IPO_weighted', 'Found_weighted', 'Mean Household Income Estimate (dollars)', 'Mean Travel", "Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data): feature_cols = [ 'Mean Household Income", "of Households With Income Less Than $24,999', 'Distance to IPO_weighted'] quantile_scaler_normalization_list = ['Offer", "of those zipcodes, economic data of the zipcode and the home prices at", "0.01, 'loss': 'ls'} model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) # transform the validation dataset", "+ 1) x = df[col] y = df[label_attr] plt.plot(x, y, 'o') # Create", "Price RF\"] = predictions[1] df_x[\"Pred House Price GB\"] = predictions[2] df_x[\"Pred House Price", "Households With Income Less Than $24,999', 'Percent of Households with Income Greater than", "time_window, feature_cols, ipo_cols): ''' :param df: dataframe with ipo data :param date_field: field", "plt.title('Correlation between features'); plt.show() def view_feature_distributions(df): # histograms df.hist(bins=25, figsize=(25, 20), grid=False); def", "metric using Root Mean Square error method seed = 7 RMS = 'neg_mean_squared_error'", "return df_x def main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number of Employees_weighted'],", "Households With Income Less Than $24,999', 'Distance to IPO_weighted'] quantile_scaler_normalization_list = ['Offer Amount_weighted',", "i in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results = [] names =", "= 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows = filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number", "= create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for Distance', 2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False) if", "Income Less Than $24,999', 'Percent of Households with Income Greater than $200,000', 'Percent", "ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns) train_corr.shape #", "lw=3) plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs. residuals plot\", fontsize=18) plt.grid(True) #plt.show()", "= prep_train_validation_test_data(df_train, df_test, 'All Homes 2 Years After Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y)", "Years After Date Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] # dataset that I will use", "column 'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns) return ipo_final_df def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max =", "of Population with no Health Insurance Coverage', 'Unemployment Rate', 'All Homes Date Filed','All", "'All Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data): feature_cols = [ 'Mean Household", "= df[col] y = df[label_attr] plt.plot(x, y, 'o') # Create regression line plt.plot(np.unique(x),", "scaler_min_max = MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list])", "'Mean Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Population", "Years After Date Filed', '2 Year Home Value ratio', 'All Homes Date Filed')", "After Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM',", "been Below Poverty Level', 'Percent of Population with no Health Insurance Coverage', 'Unemployment", "[] names = [] for name, model in pipelines: kfold = KFold(n_splits=num_folds, random_state=seed)", "[model, model_rf, model_gb] def make_predictions_model(models, df_test_x): # prepare the model predictions = models[0].predict(df_test_x)", "After Date Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df, date_field, location_field,", "Time, the Lockup Date, 1 Year after the Date is Filed and 2", "Years After Date Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df, date_field,", "'Median Age', 'Median Household Income Estimate (dollars)', 'Offer Amount_weighted', 'Per Capita Income Estimate", "'All Homes Lockup Expiration Date'], 'All Homes 2 Years After Date Filed', 0.5)", "ipo_cols): ''' :param df: dataframe with ipo data :param date_field: field that will", "= ['Found_weighted', 'Median Age', 'Percent of People under 18 years of age', 'Percent", "in column 'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns) return ipo_final_df def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max", "[ 'Distance to IPO_weighted', 'Found_weighted', 'Mean Household Income Estimate (dollars)', 'Mean Travel Time", "each of those zipcodes, economic data of the zipcode and the home prices", "fit=stats.norm) # Get the fitted parameters used by the function (mu, sigma) =", "in pipelines: kfold = KFold(n_splits=num_folds, random_state=seed) cv_results = cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS)", "1 return df_x def main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number of", "Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for Distance', 2, feature_cols,", "df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data): feature_cols = [ 'Mean Household Income Estimate (dollars)', 'Mean", "Homes 1 Year Before Date Filed', 'All Homes 2 Years After Date Filed',", "because it does have 'All Homes 2 Years After Date Filed' df_train_set_2_years =", "RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.pipeline import", "pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for Distance', 2,", "Filed', 'All Homes Lockup Expiration Date'], 'All Homes 2 Years After Date Filed',", "and prediction. Likely 2 years. Decisions: weighted average of encoded historical data -->", ":param time_window: time window used for encoding and prediction. Likely 2 years. Decisions:", "Income Estimate (dollars)', 'Offer Amount_weighted', 'Per Capita Income Estimate (dollars)', 'Percent of Females',", "pd import numpy as np import sklearn import matplotlib.pyplot as plt import seaborn", "window to the end :return: ''' encoded_data = [] df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d')", "1, 3 plt.subplot(10, 6, i + 1) x = df[col] y = df[label_attr]", "Estimate (dollars)', 'Mean Household Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent", "label_attr, feature_list): plt.figure(figsize=(25, 60)) # i: index for i, col in enumerate(feature_list): #", "'Percent of Households with Income Greater than $200,000', 'Percent of Males', 'Percent of", "row in df.iterrows(): dict = row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field] > row[date_field]) & (df[date_field]", "House Price ET Change\"] = predictions[0] / df_x[label_divider] - 1 df_x[\"Pred House Price", "train_corr = df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns) train_corr.shape # Correlation plot corr = train_corr.corr()", "of People who are Asian', 'Percent of People who are Black or African", "was created in wrangling.ipynb. Here we have every IPO in Silicon Valley, and", "dataset predictions = model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y)", "feature_cols = [ 'Mean Household Income Estimate (dollars)', 'Mean Travel Time to Work", "dict = row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field] > row[date_field]) & (df[date_field] < row[date_field] +", "enumerate(feature_list): # 3 plots here hence 1, 3 plt.subplot(10, 6, i + 1)", "df_test_x) df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data):", "sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection import", "who are Hispanic', 'Percent of People who are White', 'Percent of People whose", "results = [] names = [] for name, model in pipelines: kfold =", "have 'All Homes 2 Years After Date Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] # dataset", "run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y): # Test options and evaluation metric using Root Mean", "= pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for Distance',", "= df_train.loc[:, feature_list] y = df_train[label_attr] x_pred_test = df_test.loc[:, feature_list] X_train, X_validation, Y_train,", "those zipcodes, economic data of the zipcode and the home prices at the", "i: index for i, col in enumerate(feature_list): # 3 plots here hence 1,", "mile radius from the IPO Zipcode, the demographics of each of those zipcodes,", "= \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std()) print(msg) def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y,", "transform the validation dataset predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy --> \", model.score(df_validation_x,", "= create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years After Date Filed', '2 Year Home Value", "IPO :param time_window: time window used for encoding and prediction. Likely 2 years.", "X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.2, random_state=42) return X_train, X_validation, Y_train, Y_validation,", "0.9, xmax=xmax * 1.1, color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted", "df_ipo[ratio_divisor] # dataset that does not have 'All Homes 2 Years After Date", "X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.2, random_state=42) return X_train, X_validation, Y_train,", "'Percent of People under 18 years of age', 'Percent of People 65 years", "Filed', 'Date Filed', 'Zipcode for Distance'] ipo_cols = ['Offer Amount', 'Number of Employees',", "'All Homes 2 Years After Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR',", "def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor): # Predicting Median Price of All Homes in", "dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') # Probablity plot", "df_validation_y, 7) predictions = make_predictions_model(models, df_test_x) df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All Homes Date", "= RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y) # transform the validation dataset predictions_rf = model_rf.predict(df_validation_x)", "= plt.figure() stats.probplot(df[attr], plot=plt) plt.show() def run_ordinary_least_squares(df_x, df_y): model = sm.OLS(df_y, df_x) results", "create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor): # Predicting Median Price of All Homes in a", "Likely 2 years. Decisions: weighted average of encoded historical data --> either I", "datetime import datetime def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): ''' Import Final IPO csv that", "used by the function (mu, sigma) = stats.norm.fit(df[attr]) # plot with the distribution", "feature_cols, ipo_cols): ''' :param df: dataframe with ipo data :param date_field: field that", "sns from scipy import stats from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler,", "Filed') df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test, 'All Homes 2 Years", "'Percent of People who are White', 'Percent of People who are Black or", "either I can define it or learn it, but here I am defining", "csv that was created in wrangling.ipynb. Here we have every IPO in Silicon", "After Date Filed', '2 Year Home Value ratio', 'All Homes Date Filed') #show_correlations_matrix(df_train,", "df_train_y, cv=kfold, scoring=RMS) results.append(cv_results) names.append(name) msg = \"%s: %f (%f)\" % (name, cv_results.mean(),", "= filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number of Employees']) for i", "age', 'Percent of People 65 years and over', 'Percent of Males', 'Percent of", "= scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return", "plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm) # Get the fitted parameters used by", "Predicting Median Price of All Homes in a Zipcode, and strucuturing data to", "predictions_gb = models[2].predict(df_test_x) return [predictions, predictions_rf, predictions_gb] def create_predictions(predictions, df_x, label_divider): df_x[\"Pred House", "Homes 2 Years After Date Filed' df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()] # dataset that I", "df_train_x, df_train_y): # Test options and evaluation metric using Root Mean Square error", "for encoding and prediction. Likely 2 years. Decisions: weighted average of encoded historical", "cv=kfold, scoring=RMS) results.append(cv_results) names.append(name) msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std())", "test_size=0.2, random_state=42) return X_train, X_validation, Y_train, Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9))", "where if there is any 'NaN' value in column 'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns)", "and 2 years after the date is filed. ''' ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\")", "y, test_size=0.2, random_state=42) return X_train, X_validation, Y_train, Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10,", "xmax=xmax * 1.1, color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs.", "create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for Distance', 2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False) if __name__", "import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor", "demographic and economic data. Within radius of 10 miles of IPO :param time_window:", "= 0)) df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years After Date Filed',", "'Number of Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train, df_test", "import stats from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer from", "'min_samples_split': 2, 'learning_rate': 0.01, 'loss': 'ls'} model_gb = ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) # transform", "Hispanic', 'Percent of People who are White', 'Percent of People whose Income in", "zipcode demographic and economic data. Within radius of 10 miles of IPO :param", "return df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols, ipo_cols): ''' :param df:", "are Black or African American', 'Percent of People who are Hispanic', 'Percent of", "top_corr = df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation between features'); plt.show() def view_feature_distributions(df): # histograms", "using Root Mean Square error method seed = 7 RMS = 'neg_mean_squared_error' pipelines", "predictions[1] df_x[\"Pred House Price GB\"] = predictions[2] df_x[\"Pred House Price ET Change\"] =", "with no Health Insurance Coverage', 'Unemployment Rate', 'All Homes Date Filed','All Homes 1", "in the Past 12 months has been Below Poverty Level', 'Percent of Population", "df_test_x = prep_train_validation_test_data(df_train, df_test, 'All Homes 2 Years After Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x,", "df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def", "print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) params = {'n_estimators': 500,", "encoding and prediction. Likely 2 years. Decisions: weighted average of encoded historical data", "plt import seaborn as sns from scipy import stats from sklearn import preprocessing", "Before Date Filed', 'All Homes 2 Years After Date Filed', 'Date Filed', 'Zipcode", "Less Than $24,999', 'Percent of Households with Income Greater than $200,000', 'Percent of", "'Offer Amount_weighted', 'Per Capita Income Estimate (dollars)', 'Percent of Females', 'Percent of Households", "from sklearn.pipeline import Pipeline import statsmodels.formula.api as sm from datetime import datetime def", "# Get the fitted parameters used by the function (mu, sigma) = stats.norm.fit(df[attr])", "= row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field] > row[date_field]) & (df[date_field] < row[date_field] + np.timedelta64(time_window,", "'Zipcode for Distance', 2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False) if __name__ == \"__main__\": print(\"we", "= df_ipo[label_attr] / df_ipo[ratio_divisor] # dataset that does not have 'All Homes 2", "X = df_train.loc[:, feature_list] y = df_train[label_attr] x_pred_test = df_test.loc[:, feature_list] X_train, X_validation,", "by time differential from beginning of window to the end :return: ''' encoded_data", "wrangling.ipynb. Here we have every IPO in Silicon Valley, and each zip code", "= models[2].predict(df_test_x) return [predictions, predictions_rf, predictions_gb] def create_predictions(predictions, df_x, label_divider): df_x[\"Pred House Price", "plt.figure() stats.probplot(df[attr], plot=plt) plt.show() def run_ordinary_least_squares(df_x, df_y): model = sm.OLS(df_y, df_x) results =", "import numpy as np import sklearn import matplotlib.pyplot as plt import seaborn as", "df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test, 'All Homes 2 Years After", "#ipo_final_df = ipo_final_df.drop(columns=drop_columns) return ipo_final_df def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max = MinMaxScaler() df_ipo[min_max_list]", "plt.figure(figsize=(25, 60)) # i: index for i, col in enumerate(feature_list): # 3 plots", "from the IPO Zipcode, the demographics of each of those zipcodes, economic data", "fontsize=15) plt.title(\"Fitted vs. residuals plot\", fontsize=18) plt.grid(True) #plt.show() def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y):", "100) params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.01, 'loss': 'ls'}", "model.score(df_validation_x, df_validation_y) * 100) params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate':", "predictions = models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x) return [predictions, predictions_rf, predictions_gb]", "data --> either I can define it or learn it, but here I", "Past 12 months has been Below Poverty Level', 'Percent of Population with no", "annot=True) plt.show() top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr = df[top_feature].corr() sns.heatmap(top_corr,", "predictions_gb)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) return [model, model_rf, model_gb] def", "def run_ordinary_least_squares(df_x, df_y): model = sm.OLS(df_y, df_x) results = model.fit() print(results.summary()) plt.figure(figsize=(8, 5))", "df[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.title('Correlation between features'); plt.show() def view_feature_distributions(df): # histograms df.hist(bins=25, figsize=(25,", "vs. residuals plot\", fontsize=18) plt.grid(True) #plt.show() def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y): # Test", "from datetime import datetime def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): ''' Import Final IPO csv", "df_validation_x, df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test, 'All Homes 2 Years After Date", "and evaluation metric using Root Mean Square error method seed = 7 RMS", "feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False) if __name__ == \"__main__\": print(\"we are learning\") create_encoding_historical_zipcode_data('../data/processed/df_ipo_all.csv') #main_build_predictions()", "'Zipcode for Distance'] ipo_cols = ['Offer Amount', 'Number of Employees', 'Found', 'Distance to", "a Zipcode, and strucuturing data to do so. df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor]", "print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) return [model, model_rf, model_gb]", "= cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS) results.append(cv_results) names.append(name) msg = \"%s: %f (%f)\"", "df_test_x, 'All Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data): feature_cols = [ 'Mean", "'Date Filed', 'Zipcode for Distance', 2, feature_cols, ipo_cols) ipo_final_ecoded_df.to_csv(\"../data/processed/df_ipo_encoded_test.csv\", index=False) if __name__ ==", "predictions_rf)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) params = {'n_estimators': 500, 'max_depth':", "Years After Date Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM',", "IPO csv that was created in wrangling.ipynb. Here we have every IPO in", "Filed', 0.5) #view_feature_distributions(df_train) feature_cols = [ 'Distance to IPO_weighted', 'Found_weighted', 'Mean Household Income", "'Percent of People who are Hispanic', 'Percent of People who are White', 'Percent", "[] df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d') for index, row in df.iterrows(): dict = row.filter(feature_cols).to_dict()", "= predictions[0] df_x[\"Pred House Price RF\"] = predictions[1] df_x[\"Pred House Price GB\"] =", "People who are Asian', 'Unemployment Rate', 'Mean Travel Time to Work Estimate (minutes)',", "plt.grid(True) #plt.show() def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y): # Test options and evaluation metric", "quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years After", "plt.title(\"Fitted vs. residuals plot\", fontsize=18) plt.grid(True) #plt.show() def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y): #", "radius of 10 miles of IPO :param time_window: time window used for encoding", "in the Past 12 months has been Below Poverty Level', 'Percent of Households", "LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]] #run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y) models = build_models(df_train_x,", "are White', 'Percent of People whose Income in the Past 12 months has", "= [] for i in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results =", "filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"]", "time windows :param location_field: field that denotes the zipcode demographic and economic data.", "Coverage', 'Unemployment Rate', 'All Homes Date Filed','All Homes 1 Year Before Date Filed',", "sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor,", "House Price GB\"] = predictions[2] df_x[\"Pred House Price ET Change\"] = predictions[0] /", "{'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.01, 'loss': 'ls'} model_gb = ensemble.GradientBoostingRegressor(**params)", "'Percent of Females', 'Percent of Households With Income Less Than $24,999', 'Percent of", "ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.pipeline import Pipeline import", "sns.distplot(df[attr], fit=stats.norm) # Get the fitted parameters used by the function (mu, sigma)", "Date Filed') #show_correlations_matrix(df_train, ['All Homes 1 Year After Date Filed', 'All Homes Lockup", "#ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode", "for Distance', 'Number of Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes 2 Years After Date Filed',", "that does not have 'All Homes 2 Years After Date Filed' df_test_set_2_years =", "model model_rf = RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y) # transform the validation dataset predictions_rf", "for index, row in df.iterrows(): dict = row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field] > row[date_field])", "import preprocessing from sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer from sklearn import ensemble, datasets,", "np.timedelta64(time_window, 'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field]", "Zipcode, and strucuturing data to do so. df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor] #", "Correlation plot corr = train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True) plt.show() top_feature = corr.index[abs(corr[label_attr]", "over', 'Percent of People under 18 years of age', 'Percent of People who", "filed. ''' ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row", "plot fig = plt.figure() stats.probplot(df[attr], plot=plt) plt.show() def run_ordinary_least_squares(df_x, df_y): model = sm.OLS(df_y,", "= df[(df[date_field] > row[date_field]) & (df[date_field] < row[date_field] + np.timedelta64(time_window, 'Y'))] filtered_rows =", "residuals plot\", fontsize=18) plt.grid(True) #plt.show() def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y): # Test options", "loc='best') plt.ylabel('Frequency') # Probablity plot fig = plt.figure() stats.probplot(df[attr], plot=plt) plt.show() def run_ordinary_least_squares(df_x,", "build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7) predictions = make_predictions_model(models, df_test_x) df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All", "import matplotlib.pyplot as plt import seaborn as sns from scipy import stats from", "drop_nan_columns, drop_columns): ''' Import Final IPO csv that was created in wrangling.ipynb. Here", "'Number of Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes 2 Years After Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All", "to Work Estimate (minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Offer Amount_weighted',", "plt.show() def run_ordinary_least_squares(df_x, df_y): model = sm.OLS(df_y, df_x) results = model.fit() print(results.summary()) plt.figure(figsize=(8,", "12 months has been Below Poverty Level', 'Percent of Households With Income Less", "/df_x[label_divider] - 1 return df_x def main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date", "from scipy import stats from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler, RobustScaler,", "12 months has been Below Poverty Level', 'Percent of Population with no Health", "the IPO Zipcode, the demographics of each of those zipcodes, economic data of", "Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data): feature_cols = [ 'Mean Household Income Estimate", "x = df[col] y = df[label_attr] plt.plot(x, y, 'o') # Create regression line", "in range(0, len(algs_to_test)): pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]]))) results = [] names = []", "'Percent of People under 18 years of age', 'Percent of People who are", "Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes 2 Years After Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2", "years after the date is filed. ''' ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df =", "#print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) # prepare", "& (df[date_field] < row[date_field] + np.timedelta64(time_window, 'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]] filtered_rows.index", "have 'All Homes 2 Years After Date Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years,", "def view_feature_distributions(df): # histograms df.hist(bins=25, figsize=(25, 20), grid=False); def view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25,", "df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols, ipo_cols): ''' :param df: dataframe", "filtered_rows = df[(df[date_field] > row[date_field]) & (df[date_field] < row[date_field] + np.timedelta64(time_window, 'Y'))] filtered_rows", "def view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25, 60)) # i: index for i, col in", "[] for name, model in pipelines: kfold = KFold(n_splits=num_folds, random_state=seed) cv_results = cross_val_score(model,", "Homes 1 Year Before Date Filed', 'Zipcode for Distance', 'Number of Employees_weighted'] #view_residual_feature_plots(df_train,", "df_x, label_divider): df_x[\"Pred House Price ET\"] = predictions[0] df_x[\"Pred House Price RF\"] =", "Less Than $24,999', 'Distance to IPO_weighted'] quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number of Employees_weighted']", "African American', 'Percent of People who are Asian', 'Unemployment Rate', 'Mean Travel Time", "import Pipeline import statsmodels.formula.api as sm from datetime import datetime def load_processed_ipo_data(datafile, drop_nan_columns,", "[1], regex=True) filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number of Employees']) for i in range(0,", "\"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std()) print(msg) def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed):", "time differential from beginning of window to the end :return: ''' encoded_data =", "data :param date_field: field that will be used to create time windows :param", "print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) # prepare the model", "model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) return [model, model_rf,", "results.append(cv_results) names.append(name) msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std()) print(msg) def", "i, col in enumerate(feature_list): # 3 plots here hence 1, 3 plt.subplot(10, 6,", "time window used for encoding and prediction. Likely 2 years. Decisions: weighted average", "import pandas as pd import numpy as np import sklearn import matplotlib.pyplot as", "n_estimators=100) model_rf.fit(df_train_x, df_train_y) # transform the validation dataset predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf))", "from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.pipeline import Pipeline import statsmodels.formula.api as", "of People 65 years and over', 'Percent of Males', 'Percent of Females', 'Percent", "filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows = filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number of Employees'] =", "Past 12 months has been Below Poverty Level', 'Percent of Households With Income", "Square error method seed = 7 RMS = 'neg_mean_squared_error' pipelines = [] for", "plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs. residuals plot\", fontsize=18) plt.grid(True) #plt.show() def run_k_folds(num_folds, algs_to_test, df_train_x,", "error method seed = 7 RMS = 'neg_mean_squared_error' pipelines = [] for i", "of Employees', 'Found', 'Distance to IPO'] drop_columns = ['Unnamed: 0', 'CIK', 'Company Name']", "df.iterrows(): dict = row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field] > row[date_field]) & (df[date_field] < row[date_field]", "df.select_dtypes(include=[np.number]) train_corr = train_corr.drop(columns=drop_columns) train_corr.shape # Correlation plot corr = train_corr.corr() plt.subplots(figsize=(20, 9))", "Price GB Change\"] = predictions[2] /df_x[label_divider] - 1 return df_x def main_build_predictions(): ipo_final_with_date_filed_home", "Females', 'Percent of Households With Income Less Than $24,999', 'Percent of Households with", "100) return [model, model_rf, model_gb] def make_predictions_model(models, df_test_x): # prepare the model predictions", "windows :param location_field: field that denotes the zipcode demographic and economic data. Within", "after the Date is Filed and 2 years after the date is filed.", "model model = ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y) # transform the validation dataset predictions", "train_corr.shape # Correlation plot corr = train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True) plt.show() top_feature", "load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number of Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company Name']) min_max_normalization_list", "model.fit(df_train_x, df_train_y) # transform the validation dataset predictions = model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y,", "for Distance'] ipo_cols = ['Offer Amount', 'Number of Employees', 'Found', 'Distance to IPO']", "Before Date Filed', 'Zipcode for Distance', 'Number of Employees_weighted'] #view_residual_feature_plots(df_train, 'All Homes 2", "= [] for name, model in pipelines: kfold = KFold(n_splits=num_folds, random_state=seed) cv_results =", "Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company Name']) min_max_normalization_list = ['Found_weighted', 'Median Age', 'Percent of", "df_x[\"Pred House Price RF Change\"] = predictions[1] / df_x[label_divider] - 1 df_x[\"Pred House", "Filed' df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()] return df_train_set_2_years, df_test_set_2_years def create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols,", "return X_train, X_validation, Y_train, Y_validation, x_pred_test def plot_single_variable_distribution_and_prob_plot(df, attr): plt.subplots(figsize=(10, 9)) sns.distplot(df[attr], fit=stats.norm)", "encoded_data = [] df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d') for index, row in df.iterrows(): dict", "cross_val_score, GridSearchCV from sklearn.pipeline import Pipeline import statsmodels.formula.api as sm from datetime import", "to IPO'] drop_columns = ['Unnamed: 0', 'CIK', 'Company Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All", "the model predictions = models[0].predict(df_test_x) predictions_rf = models[1].predict(df_test_x) predictions_gb = models[2].predict(df_test_x) return [predictions,", "RF\"] = predictions[1] df_x[\"Pred House Price GB\"] = predictions[2] df_x[\"Pred House Price ET", "df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data): feature_cols", "Capita Income Estimate (dollars)', 'Percent of Population with no Health Insurance Coverage', 'Percent", "df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years After Date Filed', '2 Year Home", "$200,000', 'Percent of Males', 'Percent of People 65 years and over', 'Percent of", "18 years of age', 'Percent of People 65 years and over', 'Percent of", "filtered_rows.index = filtered_rows.index.map(str) filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field]) filtered_rows[\"time_weight\"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y')) filtered_rows =", "= model.predict(df_validation_x) #print(predictions) #print(df_test_y) print(mean_squared_error(df_validation_y, predictions)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100)", "(minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Offer Amount_weighted', 'Per Capita Income", "build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed): # prepare the model model = ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x,", "10 mile radius from the IPO Zipcode, the demographics of each of those", "field that will be used to create time windows :param location_field: field that", "= ExtraTreesRegressor(random_state=seed, n_estimators=100) model.fit(df_train_x, df_train_y) # transform the validation dataset predictions = model.predict(df_validation_x)", "of Females', 'Percent of Households With Income Less Than $24,999', 'Percent of Households", "view_feature_distributions(df): # histograms df.hist(bins=25, figsize=(25, 20), grid=False); def view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25, 60))", "[predictions, predictions_rf, predictions_gb] def create_predictions(predictions, df_x, label_divider): df_x[\"Pred House Price ET\"] = predictions[0]", "model_gb] def make_predictions_model(models, df_test_x): # prepare the model predictions = models[0].predict(df_test_x) predictions_rf =", "def create_predictions(predictions, df_x, label_divider): df_x[\"Pred House Price ET\"] = predictions[0] df_x[\"Pred House Price", "are Asian', 'Percent of People who are Black or African American', 'Percent of", "df_test, label_attr, feature_list): # Split-out validation dataset X = df_train.loc[:, feature_list] y =", "plt.subplot(10, 6, i + 1) x = df[col] y = df[label_attr] plt.plot(x, y,", "Date Filed') df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test, 'All Homes 2", "print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) # prepare the model model_rf =", "'Percent of People 65 years and over', 'Percent of Males', 'Percent of Females',", "= train_corr.drop(columns=drop_columns) train_corr.shape # Correlation plot corr = train_corr.corr() plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True)", "it. weighted average is by time differential from beginning of window to the", "Change\"] = predictions[0] / df_x[label_divider] - 1 df_x[\"Pred House Price RF Change\"] =", "the home prices at the Date Filed Time, the Lockup Date, 1 Year", "'NaN' value in column 'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns) return ipo_final_df def normalize_ipo(df_ipo, min_max_list,", "'Percent of Males', 'Percent of Females', 'Percent of People who are Hispanic', 'Percent", "plt.ylabel('Frequency') # Probablity plot fig = plt.figure() stats.probplot(df[attr], plot=plt) plt.show() def run_ordinary_least_squares(df_x, df_y):", "print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years After Date", "''' encoded_data = [] df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d') for index, row in df.iterrows():", "normalize_ipo(df_ipo, min_max_list, quantile_scaler_list): scaler_min_max = MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal')", "transform the validation dataset predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy --> \", model.score(df_validation_x,", "quantile_scaler_list): scaler_min_max = MinMaxScaler() df_ipo[min_max_list] = scaler_min_max.fit_transform( df_ipo[min_max_list]) scaler_quantile = QuantileTransformer(output_distribution='normal') df_ipo[quantile_scaler_list] =", "months has been Below Poverty Level', 'Percent of Households With Income Less Than", "#view_residual_feature_plots(df_train, 'All Homes 2 Years After Date Filed', feature_cols) #plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years", "$200,000', 'Median Household Income Estimate (dollars)', 'Mean Household Income Estimate (dollars)', 'Per Capita", "sns.heatmap(top_corr, annot=True) plt.title('Correlation between features'); plt.show() def view_feature_distributions(df): # histograms df.hist(bins=25, figsize=(25, 20),", "encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row where if there is any", "predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) return", "= pd.DataFrame(encoded_data) return ipo_final_ecoded_df def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold): train_corr = df.select_dtypes(include=[np.number]) train_corr =", "df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor] # dataset that does not have 'All Homes", "'Median Age', 'Percent of People under 18 years of age', 'Percent of People", "errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for Distance', 2, feature_cols, ipo_cols)", "df_ipo[label_attr] / df_ipo[ratio_divisor] # dataset that does not have 'All Homes 2 Years", "['Found_weighted', 'Median Age', 'Percent of People under 18 years of age', 'Percent of", "row.filter(feature_cols).to_dict() filtered_rows = df[(df[date_field] > row[date_field]) & (df[date_field] < row[date_field] + np.timedelta64(time_window, 'Y'))]", "# plot with the distribution plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu,", "zip code in a 10 mile radius from the IPO Zipcode, the demographics", "5)) p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k') xmin = min(results.fittedvalues) xmax = max(results.fittedvalues) plt.hlines(y=0,", "1.1, color='red', linestyle='--', lw=3) plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs. residuals plot\",", "ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) # transform the validation dataset predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_gb))", "df_x def main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number of Employees_weighted'], ['Unnamed:", "normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2", "datetime def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns): ''' Import Final IPO csv that was created", "Year after the Date is Filed and 2 years after the date is", "= create_predictions(predictions, df_test_x, 'All Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def create_encoding_historical_zipcode_data(data): feature_cols =", "= [] df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d') for index, row in df.iterrows(): dict =", "dataset X = df_train.loc[:, feature_list] y = df_train[label_attr] x_pred_test = df_test.loc[:, feature_list] X_train,", "the date is filed. ''' ipo_final_df = pd.read_csv(datafile, encoding=\"ISO-8859-1\") ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns)", "Filed', feature_cols) #run_ordinary_least_squares(df_train_x, df_train_y) #k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF',", "# i: index for i, col in enumerate(feature_list): # 3 plots here hence", "Rate', 'All Homes Date Filed','All Homes 1 Year Before Date Filed', 'All Homes", "= scaler_quantile.fit_transform(df_ipo[quantile_scaler_list]) df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list]) return df_ipo def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor): #", "American', 'Percent of People who are Asian', 'Unemployment Rate', 'Mean Travel Time to", "Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d') ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for", "ratio_divisor): # Predicting Median Price of All Homes in a Zipcode, and strucuturing", "ratio_label, ratio_divisor): # Predicting Median Price of All Homes in a Zipcode, and", "# 3 plots here hence 1, 3 plt.subplot(10, 6, i + 1) x", "make_predictions_model(models, df_test_x) df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All Homes Date Filed') df_test_x_with_pred.to_csv(\"../data/processed/Test_Predictions_encoded.csv\", index=False) def", "Level', 'Percent of Population with no Health Insurance Coverage', 'Unemployment Rate', 'All Homes", "sklearn import matplotlib.pyplot as plt import seaborn as sns from scipy import stats", "= ensemble.GradientBoostingRegressor(**params) model_gb.fit(df_train_x, df_train_y) # transform the validation dataset predictions_gb = model_gb.predict(df_validation_x) print(mean_squared_error(df_validation_y,", "Asian', 'Unemployment Rate', 'Mean Travel Time to Work Estimate (minutes)', 'Percent of Households", "from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor from sklearn.model_selection import KFold, cross_val_score, GridSearchCV", "parameters used by the function (mu, sigma) = stats.norm.fit(df[attr]) # plot with the", "and strucuturing data to do so. df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor] # dataset", "any 'NaN' value in column 'A' #ipo_final_df = ipo_final_df.drop(columns=drop_columns) return ipo_final_df def normalize_ipo(df_ipo,", "preprocessing from sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer from sklearn import ensemble, datasets, metrics", "'Percent of Households With Income Less Than $24,999', 'Percent of Households with Income", "of Employees_weighted'] ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list) print(ipo_final_with_date_filed_home.isnull().sum(axis = 0)) df_train, df_test =", "Employees'] = pd.to_numeric(filtered_rows['Number of Employees']) for i in range(0, len(ipo_cols)): dict[ipo_cols[i] + '_weighted']", "Homes Date Filed','All Homes 1 Year Before Date Filed', 'All Homes 2 Years", "Household Income Estimate (dollars)', 'Mean Household Income Estimate (dollars)', 'Per Capita Income Estimate", "df_y): model = sm.OLS(df_y, df_x) results = model.fit() print(results.summary()) plt.figure(figsize=(8, 5)) p =", "row[date_field]) & (df[date_field] < row[date_field] + np.timedelta64(time_window, 'Y'))] filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]]", "(dollars)', 'Percent of Females', 'Percent of Households With Income Less Than $24,999', 'Percent", "Estimate (dollars)', 'Mean Travel Time to Work Estimate (minutes)', 'Median Age', 'Median Household", "predictions_rf = model_rf.predict(df_validation_x) print(mean_squared_error(df_validation_y, predictions_rf)) print(\"Accuracy --> \", model.score(df_validation_x, df_validation_y) * 100) params", "for i, col in enumerate(feature_list): # 3 plots here hence 1, 3 plt.subplot(10,", "plot\", fontsize=18) plt.grid(True) #plt.show() def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y): # Test options and", "the distribution plt.legend(['Normal dist. ($\\mu=$ {:.2f} and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency')", "10 miles of IPO :param time_window: time window used for encoding and prediction.", "and $\\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') # Probablity plot fig = plt.figure()", "plt.subplots(figsize=(20, 9)) sns.heatmap(corr, annot=True) plt.show() top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)] plt.subplots(figsize=(12, 8)) top_corr", "figsize=(25, 20), grid=False); def view_residual_feature_plots(df, label_attr, feature_list): plt.figure(figsize=(25, 60)) # i: index for", "the model because it does have 'All Homes 2 Years After Date Filed'", "matplotlib.pyplot as plt import seaborn as sns from scipy import stats from sklearn", "2 Years After Date Filed', 'Date Filed', 'Zipcode for Distance'] ipo_cols = ['Offer", "filtered_rows.replace(['--'], [1], regex=True) filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number of Employees']) for i in", "sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer from sklearn import ensemble,", "Valley, and each zip code in a 10 mile radius from the IPO", "do so. df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor] # dataset that does not have", "sklearn.pipeline import Pipeline import statsmodels.formula.api as sm from datetime import datetime def load_processed_ipo_data(datafile,", "Pipeline import statsmodels.formula.api as sm from datetime import datetime def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns):", "plt.xlabel(\"Fitted values\", fontsize=15) plt.ylabel(\"Residuals\", fontsize=15) plt.title(\"Fitted vs. residuals plot\", fontsize=18) plt.grid(True) #plt.show() def", "Household Income Estimate (dollars)', 'Offer Amount_weighted', 'Per Capita Income Estimate (dollars)', 'Percent of", "Estimate (minutes)', 'Median Age', 'Median Household Income Estimate (dollars)', 'Per Capita Income Estimate", "Poverty Level', 'Percent of Population with no Health Insurance Coverage', 'Unemployment Rate', 'All", "IPO'] drop_columns = ['Unnamed: 0', 'CIK', 'Company Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes", "weighted average of encoded historical data --> either I can define it or", "Income Estimate (dollars)', 'Per Capita Income Estimate (dollars)', 'Percent of Population with no", "def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list): # Split-out validation dataset X = df_train.loc[:, feature_list]", "main_build_predictions(): ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number of Employees_weighted'], ['Unnamed: 0', 'CIK',", "1 Year Before Date Filed', 'Zipcode for Distance', 'Number of Employees_weighted'] #view_residual_feature_plots(df_train, 'All", "Filed Time, the Lockup Date, 1 Year after the Date is Filed and", "['Unnamed: 0', 'CIK', 'Company Name'] ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes Date Filed','Number of", "65 years and over', 'Percent of Males', 'Percent of Females', 'Percent of People", "100) # prepare the model model_rf = RandomForestRegressor(random_state=seed, n_estimators=100) model_rf.fit(df_train_x, df_train_y) # transform", "1) x = df[col] y = df[label_attr] plt.plot(x, y, 'o') # Create regression", "predictions[0] / df_x[label_divider] - 1 df_x[\"Pred House Price RF Change\"] = predictions[1] /", "is by time differential from beginning of window to the end :return: '''" ]
[ "= last_players return response def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length =", "= json.loads(body.decode()) global last_players last_players = request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global last_update last_update", "create_response(self): response = {} if last_update is None: response['status'] = 'Error' else: response['last_update']", "class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self): response", "content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) request = json.loads(body.decode()) global last_players last_players =", "from datetime import datetime import json import ssl last_update = None last_players =", "datetime import json import ssl last_update = None last_players = None class HTTPRequestHandler(BaseHTTPRequestHandler):", "= None class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def", "= 'Error' else: response['last_update'] = last_update.isoformat() if last_players is not None: response['players'] =", "None last_players = None class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json')", "last_update last_update = datetime.now() httpd = HTTPServer(('', 30001), HTTPRequestHandler) httpd.socket = ssl.wrap_socket (httpd.socket,", "= last_update.isoformat() if last_players is not None: response['players'] = last_players return response def", "def end_headers (self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self): response = {}", "request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global last_update last_update = datetime.now() httpd = HTTPServer(('', 30001),", "body = self.rfile.read(content_length) request = json.loads(body.decode()) global last_players last_players = request['players'] self.send_response(200) self.end_headers()", "= self.rfile.read(content_length) request = json.loads(body.decode()) global last_players last_players = request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode())", "last_update = datetime.now() httpd = HTTPServer(('', 30001), HTTPRequestHandler) httpd.socket = ssl.wrap_socket (httpd.socket, keyfile=\"/etc/letsencrypt/live/minetest.westeurope.cloudapp.azure.com/privkey.pem\",", "not None: response['players'] = last_players return response def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def", "self.end_headers() self.wfile.write('{}'.encode()) global last_update last_update = datetime.now() httpd = HTTPServer(('', 30001), HTTPRequestHandler) httpd.socket", "response['last_update'] = last_update.isoformat() if last_players is not None: response['players'] = last_players return response", "http.server import HTTPServer, BaseHTTPRequestHandler from datetime import datetime import json import ssl last_update", "None: response['players'] = last_players return response def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self):", "= request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global last_update last_update = datetime.now() httpd = HTTPServer(('',", "return response def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length = int(self.headers['Content-Length']) body", "do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) request", "response['players'] = last_players return response def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length", "global last_update last_update = datetime.now() httpd = HTTPServer(('', 30001), HTTPRequestHandler) httpd.socket = ssl.wrap_socket", "last_players = request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global last_update last_update = datetime.now() httpd =", "HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self): response =", "last_players is not None: response['players'] = last_players return response def do_GET(self): self.send_response(200) self.end_headers()", "import ssl last_update = None last_players = None class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self):", "if last_update is None: response['status'] = 'Error' else: response['last_update'] = last_update.isoformat() if last_players", "json import ssl last_update = None last_players = None class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers", "response['status'] = 'Error' else: response['last_update'] = last_update.isoformat() if last_players is not None: response['players']", "'Error' else: response['last_update'] = last_update.isoformat() if last_players is not None: response['players'] = last_players", "import datetime import json import ssl last_update = None last_players = None class", "= int(self.headers['Content-Length']) body = self.rfile.read(content_length) request = json.loads(body.decode()) global last_players last_players = request['players']", "self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) request = json.loads(body.decode()) global", "self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global last_update last_update = datetime.now() httpd = HTTPServer(('', 30001), HTTPRequestHandler)", "request = json.loads(body.decode()) global last_players last_players = request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global last_update", "HTTPServer, BaseHTTPRequestHandler from datetime import datetime import json import ssl last_update = None", "self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) request = json.loads(body.decode())", "def do_POST(self): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) request = json.loads(body.decode()) global last_players", "self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self): response = {} if last_update is None: response['status']", "is not None: response['players'] = last_players return response def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode())", "int(self.headers['Content-Length']) body = self.rfile.read(content_length) request = json.loads(body.decode()) global last_players last_players = request['players'] self.send_response(200)", "BaseHTTPRequestHandler.end_headers(self) def create_response(self): response = {} if last_update is None: response['status'] = 'Error'", "httpd = HTTPServer(('', 30001), HTTPRequestHandler) httpd.socket = ssl.wrap_socket (httpd.socket, keyfile=\"/etc/letsencrypt/live/minetest.westeurope.cloudapp.azure.com/privkey.pem\", certfile='/etc/letsencrypt/live/minetest.westeurope.cloudapp.azure.com/fullchain.pem', server_side=True) httpd.serve_forever()", "'*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self): response = {} if last_update is None:", "BaseHTTPRequestHandler from datetime import datetime import json import ssl last_update = None last_players", "end_headers (self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self): response = {} if", "self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self): response = {} if last_update is", "last_update is None: response['status'] = 'Error' else: response['last_update'] = last_update.isoformat() if last_players is", "last_update.isoformat() if last_players is not None: response['players'] = last_players return response def do_GET(self):", "= None last_players = None class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type',", "import HTTPServer, BaseHTTPRequestHandler from datetime import datetime import json import ssl last_update =", "last_players last_players = request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global last_update last_update = datetime.now() httpd", "None: response['status'] = 'Error' else: response['last_update'] = last_update.isoformat() if last_players is not None:", "self.wfile.write('{}'.encode()) global last_update last_update = datetime.now() httpd = HTTPServer(('', 30001), HTTPRequestHandler) httpd.socket =", "ssl last_update = None last_players = None class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self): self.send_header('Access-Control-Allow-Origin',", "json.loads(body.decode()) global last_players last_players = request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global last_update last_update =", "(self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self): response = {} if last_update", "None class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self):", "datetime import datetime import json import ssl last_update = None last_players = None", "do_POST(self): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) request = json.loads(body.decode()) global last_players last_players", "if last_players is not None: response['players'] = last_players return response def do_GET(self): self.send_response(200)", "import json import ssl last_update = None last_players = None class HTTPRequestHandler(BaseHTTPRequestHandler): def", "'application/json') BaseHTTPRequestHandler.end_headers(self) def create_response(self): response = {} if last_update is None: response['status'] =", "response = {} if last_update is None: response['status'] = 'Error' else: response['last_update'] =", "self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) request =", "def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length)", "def create_response(self): response = {} if last_update is None: response['status'] = 'Error' else:", "self.rfile.read(content_length) request = json.loads(body.decode()) global last_players last_players = request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global", "from http.server import HTTPServer, BaseHTTPRequestHandler from datetime import datetime import json import ssl", "else: response['last_update'] = last_update.isoformat() if last_players is not None: response['players'] = last_players return", "global last_players last_players = request['players'] self.send_response(200) self.end_headers() self.wfile.write('{}'.encode()) global last_update last_update = datetime.now()", "= {} if last_update is None: response['status'] = 'Error' else: response['last_update'] = last_update.isoformat()", "last_players return response def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length = int(self.headers['Content-Length'])", "last_update = None last_players = None class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self): self.send_header('Access-Control-Allow-Origin', '*')", "response def do_GET(self): self.send_response(200) self.end_headers() self.wfile.write(json.dumps(self.create_response()).encode()) def do_POST(self): content_length = int(self.headers['Content-Length']) body =", "last_players = None class HTTPRequestHandler(BaseHTTPRequestHandler): def end_headers (self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Content-Type', 'application/json') BaseHTTPRequestHandler.end_headers(self)", "is None: response['status'] = 'Error' else: response['last_update'] = last_update.isoformat() if last_players is not", "= datetime.now() httpd = HTTPServer(('', 30001), HTTPRequestHandler) httpd.socket = ssl.wrap_socket (httpd.socket, keyfile=\"/etc/letsencrypt/live/minetest.westeurope.cloudapp.azure.com/privkey.pem\", certfile='/etc/letsencrypt/live/minetest.westeurope.cloudapp.azure.com/fullchain.pem',", "<reponame>filhit/dsoulstest-server from http.server import HTTPServer, BaseHTTPRequestHandler from datetime import datetime import json import", "datetime.now() httpd = HTTPServer(('', 30001), HTTPRequestHandler) httpd.socket = ssl.wrap_socket (httpd.socket, keyfile=\"/etc/letsencrypt/live/minetest.westeurope.cloudapp.azure.com/privkey.pem\", certfile='/etc/letsencrypt/live/minetest.westeurope.cloudapp.azure.com/fullchain.pem', server_side=True)", "{} if last_update is None: response['status'] = 'Error' else: response['last_update'] = last_update.isoformat() if" ]
[ "run tag @a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute as @r[team=TEAM_{0}, tag=InGame] run scoreboard players add", "tag=InGame] run tag @a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute as @r[team=TEAM_{0}, tag=InGame] run scoreboard players", "in range(1, 22): print(\"execute as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute", "script\") for i in range(1, 22): print(\"execute as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}]", "add Winner\".format(i)) print(\"execute as @r[team=TEAM_{0}, tag=InGame] run scoreboard players add @a[team=TEAM_{0}] Wins 1\".format(i))", "Generated by python script\") for i in range(1, 22): print(\"execute as @r[team=TEAM_{0}, tag=InGame]", "print(\"execute as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute as @r[team=TEAM_{0}, tag=InGame]", "Winner\".format(i)) print(\"execute as @r[team=TEAM_{0}, tag=InGame] run scoreboard players add @a[team=TEAM_{0}] Wins 1\".format(i)) main()", "range(1, 22): print(\"execute as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute as", "python script\") for i in range(1, 22): print(\"execute as @r[team=TEAM_{0}, tag=InGame] run tag", "by python script\") for i in range(1, 22): print(\"execute as @r[team=TEAM_{0}, tag=InGame] run", "22): print(\"execute as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute as @r[team=TEAM_{0},", "as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute as @r[team=TEAM_{0}, tag=InGame] run", "@a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute as @r[team=TEAM_{0}, tag=InGame] run scoreboard players add @a[team=TEAM_{0}] Wins", "print(\"# Generated by python script\") for i in range(1, 22): print(\"execute as @r[team=TEAM_{0},", "<reponame>Geoffry-Skionfinschii/Datapack_SurvivalGames def main(): print(\"# Generated by python script\") for i in range(1, 22):", "main(): print(\"# Generated by python script\") for i in range(1, 22): print(\"execute as", "@r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute as @r[team=TEAM_{0}, tag=InGame] run scoreboard", "tag @a[team=TEAM_{0}] add Winner\".format(i)) print(\"execute as @r[team=TEAM_{0}, tag=InGame] run scoreboard players add @a[team=TEAM_{0}]", "def main(): print(\"# Generated by python script\") for i in range(1, 22): print(\"execute", "for i in range(1, 22): print(\"execute as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add", "i in range(1, 22): print(\"execute as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add Winner\".format(i))" ]
[ "url = url_pattern.match(self.data).group(1) if url.endswith('/'): url += 'index.html' if url == '/deep': #", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get() self.respond() def respond(self): response = 'HTTP/1.1 '", "this will keep running until you # interrupt the program with Ctrl-C server.serve_forever()", "non-GET requests yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get() self.respond() def respond(self): response", "if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True #", "governing permissions and # limitations under the License. # # # Furthermore it", "print('server doesnt serve non-GET requests yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get() self.respond()", "os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() print (\"Got a request of:", "only handling get for now print('server doesnt serve non-GET requests yet') self.status =", "__name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create", "'\\r\\nContent-Type: ' + self.mimetype if self.content: response += \"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8')) def", "this file except in compliance with the License. # You may obtain a", "server, binding to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) #", "HTTPStatus import re import os CWD = os.getcwd() HOME = os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler):", "# http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl -v -X GET", "% self.data) self.data = self.data.decode('utf-8') self.status = HTTPStatus.OK self.mimetype = mimetypes.types_map['.a'] #unknown type", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "PORT), MyWebServer) # Activate the server; this will keep running until you #", "ANY KIND, either express or implied. # See the License for the specific", "def respond(self): response = 'HTTP/1.1 ' + str(self.status.value) + ' ' + self.status.phrase", "coding: utf-8 import socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed under", "open(HOME+url) self.content = f.read() f.close() if url.endswith('.html'): self.mimetype = mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype", "+= 'index.html' if url == '/deep': # 301 TODO is this hardcoding? If", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "run: python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ import mimetypes from", "# # run: python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ import", "HTTPStatus.MOVED_PERMANENTLY return if not os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1:", "mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype = mimetypes.types_map['.css'] if __name__ == \"__main__\": HOST, PORT =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will", "= socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will keep running until", "print (\"Got a request of: %s\\n\" % self.data) self.data = self.data.decode('utf-8') self.status =", "HTTPStatus.OK self.mimetype = mimetypes.types_map['.a'] #unknown type is application/octet-stream self.content = None if not", "reference: extracting URL from HTTP request via a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern =", "== '/deep': # 301 TODO is this hardcoding? If yes, whats the correct", "language governing permissions and # limitations under the License. # # # Furthermore", "= 'HTTP/1.1 ' + str(self.status.value) + ' ' + self.status.phrase response += '\\r\\nContent-Type:", "OF ANY KIND, either express or implied. # See the License for the", "HOME = os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() print (\"Got a", "of the code is Copyright © 2001-2013 Python Software # Foundation; All Rights", "'index.html' if url == '/deep': # 301 TODO is this hardcoding? If yes,", "# 301 TODO is this hardcoding? If yes, whats the correct way? self.status", "Activate the server; this will keep running until you # interrupt the program", "request via a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET (.*)[ ].*\") url =", "permissions and # limitations under the License. # # # Furthermore it is", "\"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8')) def handle_get(self): # reference: extracting URL from HTTP request", "https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET (.*)[ ].*\") url = url_pattern.match(self.data).group(1) if url.endswith('/'): url +=", "= mimetypes.types_map['.css'] if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address =", "yes, whats the correct way? self.status = HTTPStatus.MOVED_PERMANENTLY return if not os.path.isfile(HOME+url): self.status", "' + str(self.status.value) + ' ' + self.status.phrase response += '\\r\\nContent-Type: ' +", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET (.*)[ ].*\") url = url_pattern.match(self.data).group(1) if url.endswith('/'): url", "self.content: response += \"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8')) def handle_get(self): # reference: extracting URL", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "http import HTTPStatus import re import os CWD = os.getcwd() HOME = os.path.abspath('./www')", "way? self.status = HTTPStatus.MOVED_PERMANENTLY return if not os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND return if", "if self.content: response += \"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8')) def handle_get(self): # reference: extracting", "is derived from the Python documentation examples thus # some of the code", "'/deep': # 301 TODO is this hardcoding? If yes, whats the correct way?", "on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "path self.status = HTTPStatus.NOT_FOUND return f = open(HOME+url) self.content = f.read() f.close() if", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "mimetypes.types_map['.css'] if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True", "response = 'HTTP/1.1 ' + str(self.status.value) + ' ' + self.status.phrase response +=", "self.data.startswith('GET'): # only handling get for now print('server doesnt serve non-GET requests yet')", "HTTPStatus.NOT_FOUND return f = open(HOME+url) self.content = f.read() f.close() if url.endswith('.html'): self.mimetype =", "= mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype = mimetypes.types_map['.css'] if __name__ == \"__main__\": HOST, PORT", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "if not self.data.startswith('GET'): # only handling get for now print('server doesnt serve non-GET", "= HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get() self.respond() def respond(self): response = 'HTTP/1.1 ' +", "return if not os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: #", "http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/", "required by applicable law or agreed to in writing, software # distributed under", "os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: # /www/ should be in the current path self.status =", "requests yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get() self.respond() def respond(self): response =", "# try: curl -v -X GET http://127.0.0.1:8080/ import mimetypes from http import HTTPStatus", "applicable law or agreed to in writing, software # distributed under the License", "or agreed to in writing, software # distributed under the License is distributed", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "+ self.mimetype if self.content: response += \"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8')) def handle_get(self): #", "self.status = HTTPStatus.OK self.mimetype = mimetypes.types_map['.a'] #unknown type is application/octet-stream self.content = None", "== -1: # /www/ should be in the current path self.status = HTTPStatus.NOT_FOUND", "self.data) self.data = self.data.decode('utf-8') self.status = HTTPStatus.OK self.mimetype = mimetypes.types_map['.a'] #unknown type is", "url_pattern = re.compile(\"^GET (.*)[ ].*\") url = url_pattern.match(self.data).group(1) if url.endswith('/'): url += 'index.html'", "(.*)[ ].*\") url = url_pattern.match(self.data).group(1) if url.endswith('/'): url += 'index.html' if url ==", "-v -X GET http://127.0.0.1:8080/ import mimetypes from http import HTTPStatus import re import", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "thus # some of the code is Copyright © 2001-2013 Python Software #", "writing, software # distributed under the License is distributed on an \"AS IS\"", "be in the current path self.status = HTTPStatus.NOT_FOUND return f = open(HOME+url) self.content", "socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to localhost on port 8080", "if not os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: # /www/", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "utf-8 import socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed under the", "True # Create the server, binding to localhost on port 8080 server =", "License. # You may obtain a copy of the License at # #", "python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ import mimetypes from http", "now print('server doesnt serve non-GET requests yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get()", "import HTTPStatus import re import os CWD = os.getcwd() HOME = os.path.abspath('./www') class", "Copyright 2013 <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0", "compliance with the License. # You may obtain a copy of the License", "serve non-GET requests yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get() self.respond() def respond(self):", "-X GET http://127.0.0.1:8080/ import mimetypes from http import HTTPStatus import re import os", "whats the correct way? self.status = HTTPStatus.MOVED_PERMANENTLY return if not os.path.isfile(HOME+url): self.status =", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "handling get for now print('server doesnt serve non-GET requests yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED", "response += \"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8')) def handle_get(self): # reference: extracting URL from", "should be in the current path self.status = HTTPStatus.NOT_FOUND return f = open(HOME+url)", "import mimetypes from http import HTTPStatus import re import os CWD = os.getcwd()", "self.status = HTTPStatus.NOT_FOUND return f = open(HOME+url) self.content = f.read() f.close() if url.endswith('.html'):", "not use this file except in compliance with the License. # You may", "type is application/octet-stream self.content = None if not self.data.startswith('GET'): # only handling get", "def handle(self): self.data = self.request.recv(1024).strip() print (\"Got a request of: %s\\n\" % self.data)", "].*\") url = url_pattern.match(self.data).group(1) if url.endswith('/'): url += 'index.html' if url == '/deep':", "MyWebServer) # Activate the server; this will keep running until you # interrupt", "8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to localhost on port", "License, Version 2.0 (the \"License\"); # you may not use this file except", "f.close() if url.endswith('.html'): self.mimetype = mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype = mimetypes.types_map['.css'] if __name__", "import os CWD = os.getcwd() HOME = os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data", "os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: # /www/ should be", "self.data = self.data.decode('utf-8') self.status = HTTPStatus.OK self.mimetype = mimetypes.types_map['.a'] #unknown type is application/octet-stream", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "code is Copyright © 2001-2013 Python Software # Foundation; All Rights Reserved #", "None if not self.data.startswith('GET'): # only handling get for now print('server doesnt serve", "Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl -v", "correct way? self.status = HTTPStatus.MOVED_PERMANENTLY return if not os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND return", "# you may not use this file except in compliance with the License.", "the License. # # # Furthermore it is derived from the Python documentation", "mimetypes.types_map['.a'] #unknown type is application/octet-stream self.content = None if not self.data.startswith('GET'): # only", "the server, binding to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer)", "self.respond() return self.handle_get() self.respond() def respond(self): response = 'HTTP/1.1 ' + str(self.status.value) +", "agreed to in writing, software # distributed under the License is distributed on", "# # # Furthermore it is derived from the Python documentation examples thus", "some of the code is Copyright © 2001-2013 Python Software # Foundation; All", "regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET (.*)[ ].*\") url = url_pattern.match(self.data).group(1) if url.endswith('/'):", "= \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to localhost", "= url_pattern.match(self.data).group(1) if url.endswith('/'): url += 'index.html' if url == '/deep': # 301", "(the \"License\"); # you may not use this file except in compliance with", "HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get() self.respond() def respond(self): response = 'HTTP/1.1 ' + str(self.status.value)", "url.endswith('.html'): self.mimetype = mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype = mimetypes.types_map['.css'] if __name__ == \"__main__\":", "# limitations under the License. # # # Furthermore it is derived from", "if url.endswith('.html'): self.mimetype = mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype = mimetypes.types_map['.css'] if __name__ ==", "# Unless required by applicable law or agreed to in writing, software #", "a request of: %s\\n\" % self.data) self.data = self.data.decode('utf-8') self.status = HTTPStatus.OK self.mimetype", "by applicable law or agreed to in writing, software # distributed under the", "# # Furthermore it is derived from the Python documentation examples thus #", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to", "self.status = HTTPStatus.MOVED_PERMANENTLY return if not os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\")", "Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python", "Furthermore it is derived from the Python documentation examples thus # some of", "Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py #", "URL from HTTP request via a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET (.*)[", "<NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "= os.getcwd() HOME = os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() print", "file except in compliance with the License. # You may obtain a copy", "# some of the code is Copyright © 2001-2013 Python Software # Foundation;", "+ self.status.phrase response += '\\r\\nContent-Type: ' + self.mimetype if self.content: response += \"\\n\\n\"", "' + self.mimetype if self.content: response += \"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8')) def handle_get(self):", "License for the specific language governing permissions and # limitations under the License.", "(\"Got a request of: %s\\n\" % self.data) self.data = self.data.decode('utf-8') self.status = HTTPStatus.OK", "socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will keep running until you", "to in writing, software # distributed under the License is distributed on an", "curl -v -X GET http://127.0.0.1:8080/ import mimetypes from http import HTTPStatus import re", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "current path self.status = HTTPStatus.NOT_FOUND return f = open(HOME+url) self.content = f.read() f.close()", "url == '/deep': # 301 TODO is this hardcoding? If yes, whats the", "a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET (.*)[ ].*\") url = url_pattern.match(self.data).group(1) if", "TODO is this hardcoding? If yes, whats the correct way? self.status = HTTPStatus.MOVED_PERMANENTLY", "from HTTP request via a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET (.*)[ ].*\")", "return f = open(HOME+url) self.content = f.read() f.close() if url.endswith('.html'): self.mimetype = mimetypes.types_map['.html']", "or implied. # See the License for the specific language governing permissions and", "str(self.status.value) + ' ' + self.status.phrase response += '\\r\\nContent-Type: ' + self.mimetype if", "documentation examples thus # some of the code is Copyright © 2001-2013 Python", "url_pattern.match(self.data).group(1) if url.endswith('/'): url += 'index.html' if url == '/deep': # 301 TODO", "is this hardcoding? If yes, whats the correct way? self.status = HTTPStatus.MOVED_PERMANENTLY return", "= HTTPStatus.NOT_FOUND return f = open(HOME+url) self.content = f.read() f.close() if url.endswith('.html'): self.mimetype", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "is application/octet-stream self.content = None if not self.data.startswith('GET'): # only handling get for", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server;", "the current path self.status = HTTPStatus.NOT_FOUND return f = open(HOME+url) self.content = f.read()", "self.respond() def respond(self): response = 'HTTP/1.1 ' + str(self.status.value) + ' ' +", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() print (\"Got a request of: %s\\n\"", "re.compile(\"^GET (.*)[ ].*\") url = url_pattern.match(self.data).group(1) if url.endswith('/'): url += 'index.html' if url", "f = open(HOME+url) self.content = f.read() f.close() if url.endswith('.html'): self.mimetype = mimetypes.types_map['.html'] elif", "import socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed under the Apache", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "this hardcoding? If yes, whats the correct way? self.status = HTTPStatus.MOVED_PERMANENTLY return if", "/www/ should be in the current path self.status = HTTPStatus.NOT_FOUND return f =", "self.handle_get() self.respond() def respond(self): response = 'HTTP/1.1 ' + str(self.status.value) + ' '", "self.status.phrase response += '\\r\\nContent-Type: ' + self.mimetype if self.content: response += \"\\n\\n\" +", "© 2001-2013 Python Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html #", "os CWD = os.getcwd() HOME = os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data =", "= None if not self.data.startswith('GET'): # only handling get for now print('server doesnt", "+ ' ' + self.status.phrase response += '\\r\\nContent-Type: ' + self.mimetype if self.content:", "f.read() f.close() if url.endswith('.html'): self.mimetype = mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype = mimetypes.types_map['.css'] if", "hardcoding? If yes, whats the correct way? self.status = HTTPStatus.MOVED_PERMANENTLY return if not", "Python Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run:", "' + self.status.phrase response += '\\r\\nContent-Type: ' + self.mimetype if self.content: response +=", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ import mimetypes from http import", "= f.read() f.close() if url.endswith('.html'): self.mimetype = mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype = mimetypes.types_map['.css']", "request of: %s\\n\" % self.data) self.data = self.data.decode('utf-8') self.status = HTTPStatus.OK self.mimetype =", "url += 'index.html' if url == '/deep': # 301 TODO is this hardcoding?", "self.data.decode('utf-8') self.status = HTTPStatus.OK self.mimetype = mimetypes.types_map['.a'] #unknown type is application/octet-stream self.content =", "re import os CWD = os.getcwd() HOME = os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def handle(self):", "not self.data.startswith('GET'): # only handling get for now print('server doesnt serve non-GET requests", "self.mimetype = mimetypes.types_map['.css'] if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address", "use this file except in compliance with the License. # You may obtain", "self.mimetype = mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype = mimetypes.types_map['.css'] if __name__ == \"__main__\": HOST,", "8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will keep", "it is derived from the Python documentation examples thus # some of the", "is Copyright © 2001-2013 Python Software # Foundation; All Rights Reserved # #", "mimetypes from http import HTTPStatus import re import os CWD = os.getcwd() HOME", "self.data = self.request.recv(1024).strip() print (\"Got a request of: %s\\n\" % self.data) self.data =", "# coding: utf-8 import socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed", "derived from the Python documentation examples thus # some of the code is", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "2001-2013 Python Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # #", "http://127.0.0.1:8080/ import mimetypes from http import HTTPStatus import re import os CWD =", "if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: # /www/ should be in the current path self.status", "# run: python freetests.py # try: curl -v -X GET http://127.0.0.1:8080/ import mimetypes", "If yes, whats the correct way? self.status = HTTPStatus.MOVED_PERMANENTLY return if not os.path.isfile(HOME+url):", "' ' + self.status.phrase response += '\\r\\nContent-Type: ' + self.mimetype if self.content: response", "HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding", "2.0 (the \"License\"); # you may not use this file except in compliance", "handle(self): self.data = self.request.recv(1024).strip() print (\"Got a request of: %s\\n\" % self.data) self.data", "url.endswith('.css'): self.mimetype = mimetypes.types_map['.css'] if __name__ == \"__main__\": HOST, PORT = \"localhost\", 8080", "Create the server, binding to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT),", "os.getcwd() HOME = os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() print (\"Got", "for the specific language governing permissions and # limitations under the License. #", "= mimetypes.types_map['.a'] #unknown type is application/octet-stream self.content = None if not self.data.startswith('GET'): #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "from http import HTTPStatus import re import os CWD = os.getcwd() HOME =", "-1: # /www/ should be in the current path self.status = HTTPStatus.NOT_FOUND return", "# # Unless required by applicable law or agreed to in writing, software", "via a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET (.*)[ ].*\") url = url_pattern.match(self.data).group(1)", "#unknown type is application/octet-stream self.content = None if not self.data.startswith('GET'): # only handling", "express or implied. # See the License for the specific language governing permissions", "= HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: # /www/ should be in the", "MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() print (\"Got a request of: %s\\n\" %", "either express or implied. # See the License for the specific language governing", "= os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip() print (\"Got a request", "under the License. # # # Furthermore it is derived from the Python", "= HTTPStatus.OK self.mimetype = mimetypes.types_map['.a'] #unknown type is application/octet-stream self.content = None if", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "examples thus # some of the code is Copyright © 2001-2013 Python Software", "the Python documentation examples thus # some of the code is Copyright ©", "doesnt serve non-GET requests yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get() self.respond() def", "= open(HOME+url) self.content = f.read() f.close() if url.endswith('.html'): self.mimetype = mimetypes.types_map['.html'] elif url.endswith('.css'):", "the License. # You may obtain a copy of the License at #", "extracting URL from HTTP request via a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "= HTTPStatus.MOVED_PERMANENTLY return if not os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\") ==", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "get for now print('server doesnt serve non-GET requests yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond()", "# Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py", "All Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try:", "+= '\\r\\nContent-Type: ' + self.mimetype if self.content: response += \"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8'))", "return if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: # /www/ should be in the current path", "# Copyright 2013 <NAME>, <NAME> # # Licensed under the Apache License, Version", "= self.request.recv(1024).strip() print (\"Got a request of: %s\\n\" % self.data) self.data = self.data.decode('utf-8')", "with the License. # You may obtain a copy of the License at", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "binding to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate", "def handle_get(self): # reference: extracting URL from HTTP request via a regex #", "if url == '/deep': # 301 TODO is this hardcoding? If yes, whats", "the correct way? self.status = HTTPStatus.MOVED_PERMANENTLY return if not os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND", "law or agreed to in writing, software # distributed under the License is", "Python documentation examples thus # some of the code is Copyright © 2001-2013", "the License for the specific language governing permissions and # limitations under the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond() return self.handle_get() self.respond() def respond(self): response = 'HTTP/1.1", "Rights Reserved # # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl", "self.content self.request.sendall(response.encode('utf-8')) def handle_get(self): # reference: extracting URL from HTTP request via a", "CWD = os.getcwd() HOME = os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def handle(self): self.data = self.request.recv(1024).strip()", "+ self.content self.request.sendall(response.encode('utf-8')) def handle_get(self): # reference: extracting URL from HTTP request via", "from the Python documentation examples thus # some of the code is Copyright", "\"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server,", "Copyright © 2001-2013 Python Software # Foundation; All Rights Reserved # # http://docs.python.org/2/library/socketserver.html", "+= \"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8')) def handle_get(self): # reference: extracting URL from HTTP", "socketserver # Copyright 2013 <NAME>, <NAME> # # Licensed under the Apache License,", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "application/octet-stream self.content = None if not self.data.startswith('GET'): # only handling get for now", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the server; this will keep running", "respond(self): response = 'HTTP/1.1 ' + str(self.status.value) + ' ' + self.status.phrase response", "import re import os CWD = os.getcwd() HOME = os.path.abspath('./www') class MyWebServer(socketserver.BaseRequestHandler): def", "# Furthermore it is derived from the Python documentation examples thus # some", "elif url.endswith('.css'): self.mimetype = mimetypes.types_map['.css'] if __name__ == \"__main__\": HOST, PORT = \"localhost\",", "self.request.recv(1024).strip() print (\"Got a request of: %s\\n\" % self.data) self.data = self.data.decode('utf-8') self.status", "See the License for the specific language governing permissions and # limitations under", "# # http://docs.python.org/2/library/socketserver.html # # run: python freetests.py # try: curl -v -X", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "self.status = HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: # /www/ should be in", "self.request.sendall(response.encode('utf-8')) def handle_get(self): # reference: extracting URL from HTTP request via a regex", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "\"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the server, binding to localhost on", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "response += '\\r\\nContent-Type: ' + self.mimetype if self.content: response += \"\\n\\n\" + self.content", "<gh_stars>0 # coding: utf-8 import socketserver # Copyright 2013 <NAME>, <NAME> # #", "== \"__main__\": HOST, PORT = \"localhost\", 8080 socketserver.TCPServer.allow_reuse_address = True # Create the", "# only handling get for now print('server doesnt serve non-GET requests yet') self.status", "%s\\n\" % self.data) self.data = self.data.decode('utf-8') self.status = HTTPStatus.OK self.mimetype = mimetypes.types_map['.a'] #unknown", "specific language governing permissions and # limitations under the License. # # #", "301 TODO is this hardcoding? If yes, whats the correct way? self.status =", "if url.endswith('/'): url += 'index.html' if url == '/deep': # 301 TODO is", "License. # # # Furthermore it is derived from the Python documentation examples", "and # limitations under the License. # # # Furthermore it is derived", "Version 2.0 (the \"License\"); # you may not use this file except in", "# reference: extracting URL from HTTP request via a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern", "except in compliance with the License. # You may obtain a copy of", "try: curl -v -X GET http://127.0.0.1:8080/ import mimetypes from http import HTTPStatus import", "not os.path.isfile(HOME+url): self.status = HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: # /www/ should", "# Create the server, binding to localhost on port 8080 server = socketserver.TCPServer((HOST,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "2013 <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "url.endswith('/'): url += 'index.html' if url == '/deep': # 301 TODO is this", "# Activate the server; this will keep running until you # interrupt the", "the code is Copyright © 2001-2013 Python Software # Foundation; All Rights Reserved", "server; this will keep running until you # interrupt the program with Ctrl-C", "HTTP request via a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request url_pattern = re.compile(\"^GET (.*)[ ].*\") url", "self.content = None if not self.data.startswith('GET'): # only handling get for now print('server", "in the current path self.status = HTTPStatus.NOT_FOUND return f = open(HOME+url) self.content =", "to localhost on port 8080 server = socketserver.TCPServer((HOST, PORT), MyWebServer) # Activate the", "for now print('server doesnt serve non-GET requests yet') self.status = HTTPStatus.METHOD_NOT_ALLOWED self.respond() return", "= True # Create the server, binding to localhost on port 8080 server", "self.content = f.read() f.close() if url.endswith('.html'): self.mimetype = mimetypes.types_map['.html'] elif url.endswith('.css'): self.mimetype =", "the specific language governing permissions and # limitations under the License. # #", "# /www/ should be in the current path self.status = HTTPStatus.NOT_FOUND return f", "handle_get(self): # reference: extracting URL from HTTP request via a regex # https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request", "HTTPStatus.NOT_FOUND return if os.path.abspath(HOME+url).find(CWD+\"/www/\") == -1: # /www/ should be in the current", "GET http://127.0.0.1:8080/ import mimetypes from http import HTTPStatus import re import os CWD", "+ str(self.status.value) + ' ' + self.status.phrase response += '\\r\\nContent-Type: ' + self.mimetype", "'HTTP/1.1 ' + str(self.status.value) + ' ' + self.status.phrase response += '\\r\\nContent-Type: '", "= re.compile(\"^GET (.*)[ ].*\") url = url_pattern.match(self.data).group(1) if url.endswith('/'): url += 'index.html' if", "the server; this will keep running until you # interrupt the program with", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "self.mimetype = mimetypes.types_map['.a'] #unknown type is application/octet-stream self.content = None if not self.data.startswith('GET'):", "limitations under the License. # # # Furthermore it is derived from the", "= self.data.decode('utf-8') self.status = HTTPStatus.OK self.mimetype = mimetypes.types_map['.a'] #unknown type is application/octet-stream self.content", "self.mimetype if self.content: response += \"\\n\\n\" + self.content self.request.sendall(response.encode('utf-8')) def handle_get(self): # reference:", "of: %s\\n\" % self.data) self.data = self.data.decode('utf-8') self.status = HTTPStatus.OK self.mimetype = mimetypes.types_map['.a']", "return self.handle_get() self.respond() def respond(self): response = 'HTTP/1.1 ' + str(self.status.value) + '" ]
[ "Test getting File Logger. # Override Config LogType to 'CONSOLE' which creates ConsoleLogger.", "def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map = {\"LogLevel\": \"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting()", "of Type FileLogger.\") # Test getting File Logger. # Override Config LogType to", "LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled Setting ONFAIL.\") # Test getting Logging Enabled Setting. #", "self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") # Test getting Log Directory.", "to 'CONSOLE' which creates ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map = {\"LogType\": \"CONSOLE\", \"Log\": \"YES\"}", "Setting. # Override Config to 'NO' def test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map,", "# @Test(singleThreaded = true) class LoggingConfigUnitTest(unittest.TestCase): # Test getting Logging Enabled Setting. #", "config = Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() # Test getting File Logger. # Override Config", "Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map = {\"Log\":", "Level Setting INFORMATION.\") # Test getting Logging Level Setting. # Override Config to", "test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting", "Logging Level Setting SUSPENDED.\") # Test getting Logging Level Setting with Illegal Argument.", "LogType to 'CONSOLE' which creates ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map = {\"LogType\": \"CONSOLE\", \"Log\":", "Test getting Log Directory. def test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__)) + \"\\\\Logs\" self.assertEquals(LoggingConfig().get_log_directory(), default_path,", "Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging Enabled Setting YES.\") # Test getting", "getting Logging Level Setting. # Override Config to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map =", "\"YES\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging Enabled Setting YES.\")", "Setting. # Override Config to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map,", "def test_getFileLoggerTest(self): new_value_map = {\"LogType\": \"TXT\", \"Log\": \"YES\"} config = Config().add_general_test_setting_values(new_value_map, True) file_name", "Level Setting. # Override Config to 'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\": \"ERROR\"}", "Logging Enabled Setting NO.\") # Test getting Logging Enabled Setting with an Illegal", "\"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting VERBOSE.\") # Test getting", "True) file_name = \"TestLog.txt\" logger = logging_config.get_logger(file_name) instance = isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected", "be of Type ConsoleLogger.\") # Test getting Log Directory. def test_getLogDirectoryTest(self): default_path =", "def test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__)) + \"\\\\Logs\" self.assertEquals(LoggingConfig().get_log_directory(), default_path, StringProcessor.safe_formatter(\"Expected Default Path '{}'.\",", "Logging Level Setting. # Override Config to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\":", "Log to 'NO' which creates ConsoleLogger by default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\":", "test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting", "test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map = {\"LogLevel\": \"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() #", "\"Expected Logging Level Setting SUCCESS.\") # Test getting Logging Level Setting. # Override", "import Config from utilities.StringProcessor import StringProcessor # Logging Configuration unit test class. #", "file_name = \"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name) instance = isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected Logger", "test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting", "to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected", "{\"LogType\": \"TXT\", \"Log\": \"YES\"} config = Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config =", "isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") # Test getting", "\"Expected Logging Level Setting WARNING.\") # Test getting Logging Level Setting. # Override", "# Override Config to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True)", "'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging", "Test getting Logging Enabled Setting. # Override Config to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map", "self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled Setting ONFAIL.\") # Test getting Logging Enabled Setting.", "getting Log Directory. def test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__)) + \"\\\\Logs\" self.assertEquals(LoggingConfig().get_log_directory(), default_path, StringProcessor.safe_formatter(\"Expected", "\"Expected Logging Enabled Setting NO.\") # Test getting Logging Enabled Setting with an", "Logging Enabled Setting. # Override Config to 'YES' def test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\":", "Override Config to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name,", "Logging Level Setting. # Override Config to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\":", "Directory. def test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__)) + \"\\\\Logs\" self.assertEquals(LoggingConfig().get_log_directory(), default_path, StringProcessor.safe_formatter(\"Expected Default Path", "Config to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(),", "which creates ConsoleLogger by default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True)", "Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() # Test getting File Logger. # Override Config LogType to", "with an Illegal Argument # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def", "Setting. # Override Config to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\": \"ONFAIL\"} config", "SUSPENDED.\") # Test getting Logging Level Setting with Illegal Argument. # Override Config", "\"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting ERROR.\") # Test getting", "Setting with Illegal Argument. # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def", "= {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name) instance =", "config = Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected", "'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map = {\"LogLevel\": \"INVALIDVALUE\"} config", "Test getting Logging Enabled Setting. # Override Config to 'NO' def test_getLoggingDisabledSettingTest(self): new_value_map", "def test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level", "= {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUCCESS.\") #", "Config LogType to 'CONSOLE' which creates ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map = {\"LogType\": \"CONSOLE\",", "new_value_map = {\"LogType\": \"TXT\", \"Log\": \"YES\"} config = Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\"", "new_value_map = {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUSPENDED.\")", "# Tests running in serial. # @Test(singleThreaded = true) class LoggingConfigUnitTest(unittest.TestCase): # Test", "LogType to 'TXT' which creates FileLogger. def test_getFileLoggerTest(self): new_value_map = {\"LogType\": \"TXT\", \"Log\":", "new_value_map = {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting VERBOSE.\")", "self.assertRaises(AttributeError): new_value_map = {\"LogLevel\": \"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() # Test getting", "Configuration unit test class. # Tests running in serial. # @Test(singleThreaded = true)", "Logging Level Setting GENERIC.\") # Test getting Logging Level Setting. # Override Config", "= {\"LogType\": \"TXT\", \"Log\": \"YES\"} config = Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config", "Enabled Setting NO.\") # Test getting Logging Enabled Setting with an Illegal Argument", "baseLogger.ConsoleLogger import ConsoleLogger from baseLogger.FileLogger import FileLogger from baseLogger.LoggingConfig import LoggingConfig from baseLogger.constants.LoggingEnabled", "Logging Enabled Setting. # Override Config to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\":", "getting Logging Level Setting. # Override Config to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map =", "ConsoleLogger from baseLogger.FileLogger import FileLogger from baseLogger.LoggingConfig import LoggingConfig from baseLogger.constants.LoggingEnabled import LoggingEnabled", "\"YES\"} config = Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger),", "from utilities.Config import Config from utilities.StringProcessor import StringProcessor # Logging Configuration unit test", "\"Expected Logging Level Setting GENERIC.\") # Test getting Logging Level Setting. # Override", "config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled Setting ONFAIL.\") #", "= {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting GENERIC.\") #", "Logging Enabled Setting YES.\") # Test getting Logging Enabled Setting. # Override Config", "- Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map = {\"LogLevel\": \"INVALIDVALUE\"} config =", "= \"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name) instance = isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to", "new_value_map = {\"LogLevel\": \"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() # Test getting File", "'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging", "FileLogger. def test_getFileLoggerTest(self): new_value_map = {\"LogType\": \"TXT\", \"Log\": \"YES\"} config = Config().add_general_test_setting_values(new_value_map, True)", "# Test getting Logging Level Setting. # Override Config to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self):", "True) LoggingConfig().get_logging_enabled_setting() # Test getting Logging Level Setting. # Override Config to 'VERBOSE'", "Level Setting. # Override Config to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\": \"VERBOSE\"}", "\"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name) instance = isinstance(logging_config, ConsoleLogger)", "= isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") # Test", "# Override Config to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True)", "def test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level", "LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting WARNING.\") # Test getting Logging Level Setting. #", "from utilities.StringProcessor import StringProcessor # Logging Configuration unit test class. # Tests running", "LoggingConfigUnitTest(unittest.TestCase): # Test getting Logging Enabled Setting. # Override Config to 'YES' def", "Test getting Logging Level Setting. # Override Config to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map", "Level Setting. # Override Config to 'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\": \"WARNING\"}", "\"Expected Logging Enabled Setting YES.\") # Test getting Logging Enabled Setting. # Override", "test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map = {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() # Test getting", "LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger to be of Type FileLogger.\") # Test getting", "File Logger. # Override Config Log to 'NO' which creates ConsoleLogger by default.", "Logging Level Setting. # Override Config to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\":", "creates ConsoleLogger by default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name", "Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting VERBOSE.\") # Test getting Logging", "# Test getting Logging Level Setting. # Override Config to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self):", "Logger to be of Type ConsoleLogger.\") # Test getting Log Directory. def test_getLogDirectoryTest(self):", "Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting INFORMATION.\") # Test getting Logging", "Setting. # Override Config to 'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map,", "running in serial. # @Test(singleThreaded = true) class LoggingConfigUnitTest(unittest.TestCase): # Test getting Logging", "getting File Logger. # Override Config LogType to 'CONSOLE' which creates ConsoleLogger. def", "to 'TXT' which creates FileLogger. def test_getFileLoggerTest(self): new_value_map = {\"LogType\": \"TXT\", \"Log\": \"YES\"}", "Level Setting with Illegal Argument. # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException", "os import unittest from baseLogger.ConsoleLogger import ConsoleLogger from baseLogger.FileLogger import FileLogger from baseLogger.LoggingConfig", "\"CONSOLE\", \"Log\": \"YES\"} logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logger =", "to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected", "Override Config to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\": \"ONFAIL\"} config = Config()", "{\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting WARNING.\") # Test", "from baseLogger.constants.MessageType import MessageType from utilities.Config import Config from utilities.StringProcessor import StringProcessor #", "import os import unittest from baseLogger.ConsoleLogger import ConsoleLogger from baseLogger.FileLogger import FileLogger from", "with self.assertRaises(AttributeError): new_value_map = {\"LogLevel\": \"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() # Test", "IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map = {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() #", "Logging Level Setting. # Override Config to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\":", "self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting ERROR.\") # Test getting Logging Level Setting.", "Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled Setting ONFAIL.\") # Test getting", "logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logger = logging_config.get_logger(file_name) instance =", "INFORMATION.\") # Test getting Logging Level Setting. # Override Config to 'GENERIC' def", "LoggingConfig(config).get_logging_level_setting() # Test getting File Logger. # Override Config LogType to 'TXT' which", "to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\": \"ONFAIL\"} config = Config() config.add_general_test_setting_values(new_value_map, True)", "getting Logging Level Setting. # Override Config to 'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map =", "Override Config to 'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name,", "Config to 'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(),", "# Test getting File Logger. # Override Config LogType to 'TXT' which creates", "Level Setting ERROR.\") # Test getting Logging Level Setting. # Override Config to", "# Override Config to 'NO' def test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True)", "True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting INFORMATION.\") # Test getting Logging Level", "LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logger = logging_config.get_logger(file_name) instance = isinstance(logger, ConsoleLogger)", "Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map = {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting()", "ConsoleLogger by default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name =", "Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() # Test getting Logging Level Setting. # Override Config to", "True) LoggingConfig(config).get_logging_level_setting() # Test getting File Logger. # Override Config LogType to 'TXT'", "new_value_map = {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting ERROR.\")", "Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map = {\"LogLevel\":", "{\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting GENERIC.\") # Test", "= Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() # Test getting File Logger. # Override Config LogType", "# Test getting Logging Level Setting. # Override Config to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self):", "GENERIC.\") # Test getting Logging Level Setting. # Override Config to 'SUCCESS' def", "Level Setting. # Override Config to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\": \"GENERIC\"}", "Override Config to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name,", "= Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled Setting ONFAIL.\") # Test", "Test getting File Logger. # Override Config Log to 'NO' which creates ConsoleLogger", "\"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting GENERIC.\") # Test getting", "self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting VERBOSE.\") # Test getting Logging Level Setting.", "NO.\") # Test getting Logging Enabled Setting with an Illegal Argument # Override", "{\"LogType\": \"CONSOLE\", \"Log\": \"YES\"} logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logger", "Setting VERBOSE.\") # Test getting Logging Level Setting. # Override Config to 'INFORMATION'", "getting Logging Level Setting. # Override Config to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map =", "Override Config to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name,", "\"Expected Logger to be of Type FileLogger.\") # Test getting File Logger. #", "self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging Enabled Setting YES.\") # Test getting Logging Enabled Setting.", "StringProcessor # Logging Configuration unit test class. # Tests running in serial. #", "Logger. # Override Config LogType to 'CONSOLE' which creates ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map", "# Override Config to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True)", "Logger to be of Type ConsoleLogger.\") # Test getting File Logger. # Override", "Config to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(),", "LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUCCESS.\") # Test getting Logging Level Setting. #", "LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting VERBOSE.\") # Test getting Logging Level Setting. #", "logging_config.get_logger(file_name) instance = isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\")", "Type ConsoleLogger.\") # Test getting Log Directory. def test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__)) +", "to 'YES' def test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\": \"YES\"} config = Config() config.add_general_test_setting_values(new_value_map, True)", "# Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map", "= Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger", "Level Setting GENERIC.\") # Test getting Logging Level Setting. # Override Config to", "test_getFileLoggerTest(self): new_value_map = {\"LogType\": \"TXT\", \"Log\": \"YES\"} config = Config().add_general_test_setting_values(new_value_map, True) file_name =", "= {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() # Test getting Logging Level Setting. #", "def test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level", "to 'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected", "\"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting INFORMATION.\") # Test getting", "creates ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map = {\"LogType\": \"CONSOLE\", \"Log\": \"YES\"} logging_config = LoggingConfig()", "Logging Level Setting INFORMATION.\") # Test getting Logging Level Setting. # Override Config", "True) file_name = \"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name) instance = isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected", "# Test getting File Logger. # Override Config Log to 'NO' which creates", "getting Logging Enabled Setting with an Illegal Argument # Override Config to 'INVALIDVALUE'", "# Test getting Logging Level Setting. # Override Config to 'ERROR' def test_getLoggingLevelErrorSettingTest(self):", "Setting ERROR.\") # Test getting Logging Level Setting. # Override Config to 'SUSPENDED'", "to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected", "baseLogger.FileLogger import FileLogger from baseLogger.LoggingConfig import LoggingConfig from baseLogger.constants.LoggingEnabled import LoggingEnabled from baseLogger.constants.MessageType", "'TXT' which creates FileLogger. def test_getFileLoggerTest(self): new_value_map = {\"LogType\": \"TXT\", \"Log\": \"YES\"} config", "File Logger. # Override Config LogType to 'TXT' which creates FileLogger. def test_getFileLoggerTest(self):", "= \"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger to be of Type", "Logger. # Override Config LogType to 'TXT' which creates FileLogger. def test_getFileLoggerTest(self): new_value_map", "# Override Config to 'YES' def test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\": \"YES\"} config =", "Level Setting WARNING.\") # Test getting Logging Level Setting. # Override Config to", "'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging", "\"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger to be of Type FileLogger.\")", "baseLogger.LoggingConfig import LoggingConfig from baseLogger.constants.LoggingEnabled import LoggingEnabled from baseLogger.constants.MessageType import MessageType from utilities.Config", "= logging_config.get_logger(file_name) instance = isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type", "self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting WARNING.\") # Test getting Logging Level Setting.", "logging_config.add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logger = logging_config.get_logger(file_name) instance = isinstance(logger, ConsoleLogger) self.assertTrue(instance,", "Config to 'YES' def test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\": \"YES\"} config = Config() config.add_general_test_setting_values(new_value_map,", "\"TXT\", \"Log\": \"YES\"} config = Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name)", "getting Logging Enabled Setting. # Override Config to 'YES' def test_getLoggingEnabledSettingTest(self): new_value_map =", "Type ConsoleLogger.\") # Test getting File Logger. # Override Config Log to 'NO'", "new_value_map = {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting INFORMATION.\")", "True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging Enabled Setting YES.\") # Test getting Logging Enabled", "Config to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(),", "in serial. # @Test(singleThreaded = true) class LoggingConfigUnitTest(unittest.TestCase): # Test getting Logging Enabled", "test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting", "'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging", "= LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger to be of Type FileLogger.\") # Test", "logger = logging_config.get_logger(file_name) instance = isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of", "'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging", "instance = isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") #", "def test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\": \"ONFAIL\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name,", "Level Setting. # Override Config to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\": \"SUSPENDED\"}", "def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config =", "True) file_name = \"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger to be", "\"YES\"} logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logger = logging_config.get_logger(file_name) instance", "isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") # Test getting", "Illegal Argument # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with", "file_name = \"TestLog.txt\" logger = logging_config.get_logger(file_name) instance = isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected Logger", "{\"log\": \"YES\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging Enabled Setting", "be of Type ConsoleLogger.\") # Test getting File Logger. # Override Config Log", "new_value_map = {\"LogType\": \"CONSOLE\", \"Log\": \"YES\"} logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name =", "Logger. # Override Config Log to 'NO' which creates ConsoleLogger by default. def", "utilities.Config import Config from utilities.StringProcessor import StringProcessor # Logging Configuration unit test class.", "= {\"Log\": \"ONFAIL\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled", "new_value_map = {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() # Test getting Logging Level Setting.", "# Override Config to 'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True)", "LoggingEnabled.YES.name, \"Expected Logging Enabled Setting YES.\") # Test getting Logging Enabled Setting. #", "Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting ERROR.\") # Test getting Logging", "import unittest from baseLogger.ConsoleLogger import ConsoleLogger from baseLogger.FileLogger import FileLogger from baseLogger.LoggingConfig import", "to be of Type FileLogger.\") # Test getting File Logger. # Override Config", "getting File Logger. # Override Config Log to 'NO' which creates ConsoleLogger by", "new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name) instance", "Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name) instance = isinstance(logging_config, ConsoleLogger) self.assertTrue(instance,", "Test getting Logging Level Setting. # Override Config to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map", "Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map =", "def test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\": \"YES\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name,", "ERROR.\") # Test getting Logging Level Setting. # Override Config to 'SUSPENDED' def", "def test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level", "Logging Level Setting. # Override Config to 'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\":", "\"Expected Logging Enabled Setting ONFAIL.\") # Test getting Logging Enabled Setting. # Override", "to 'NO' which creates ConsoleLogger by default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\": \"NO\"}", "default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config", "creates FileLogger. def test_getFileLoggerTest(self): new_value_map = {\"LogType\": \"TXT\", \"Log\": \"YES\"} config = Config().add_general_test_setting_values(new_value_map,", "Test getting Logging Level Setting. # Override Config to 'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map", "Illegal Argument. # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with", "# Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map", "\"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() # Test getting File Logger. # Override", "'CONSOLE' which creates ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map = {\"LogType\": \"CONSOLE\", \"Log\": \"YES\"} logging_config", "'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging", "\"Expected Logging Level Setting SUSPENDED.\") # Test getting Logging Level Setting with Illegal", "= Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging Enabled Setting YES.\") # Test", "Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging Enabled Setting NO.\") # Test getting Logging", "utilities.StringProcessor import StringProcessor # Logging Configuration unit test class. # Tests running in", "VERBOSE.\") # Test getting Logging Level Setting. # Override Config to 'INFORMATION' def", "getting Logging Enabled Setting. # Override Config to 'NO' def test_getLoggingDisabledSettingTest(self): new_value_map =", "FileLogger), \"Expected Logger to be of Type FileLogger.\") # Test getting File Logger.", "IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map = {\"LogLevel\": \"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map, True)", "Test getting File Logger. # Override Config LogType to 'TXT' which creates FileLogger.", "# Test getting Logging Enabled Setting. # Override Config to 'YES' def test_getLoggingEnabledSettingTest(self):", "config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging Enabled Setting YES.\") #", "to be of Type ConsoleLogger.\") # Test getting Log Directory. def test_getLogDirectoryTest(self): default_path", "new_value_map = {\"Log\": \"ONFAIL\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging", "SUCCESS.\") # Test getting Logging Level Setting. # Override Config to 'WARNING' def", "Setting. # Override Config to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map,", "self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUCCESS.\") # Test getting Logging Level Setting.", "file_name = \"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger to be of", "# Override Config to 'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True)", "ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") # Test getting Log", "Config to 'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(),", "Override Config LogType to 'TXT' which creates FileLogger. def test_getFileLoggerTest(self): new_value_map = {\"LogType\":", "be of Type FileLogger.\") # Test getting File Logger. # Override Config LogType", "Config to 'NO' def test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name,", "Setting SUCCESS.\") # Test getting Logging Level Setting. # Override Config to 'WARNING'", "Logging Configuration unit test class. # Tests running in serial. # @Test(singleThreaded =", "def test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level", "YES.\") # Test getting Logging Enabled Setting. # Override Config to 'ONFAIL' def", "\"Expected Logger to be of Type ConsoleLogger.\") # Test getting File Logger. #", "self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") # Test getting File Logger.", "= {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUSPENDED.\") #", "{\"LogLevel\": \"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() # Test getting File Logger. #", "class. # Tests running in serial. # @Test(singleThreaded = true) class LoggingConfigUnitTest(unittest.TestCase): #", "to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected", "with self.assertRaises(NotImplementedError): new_value_map = {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() # Test getting Logging", "File Logger. # Override Config LogType to 'CONSOLE' which creates ConsoleLogger. def test_getConsoleLoggerTest(self):", "to 'NO' def test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected", "self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger to be of Type FileLogger.\") # Test getting File", "Logging Enabled Setting with an Illegal Argument # Override Config to 'INVALIDVALUE' -", "which creates ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map = {\"LogType\": \"CONSOLE\", \"Log\": \"YES\"} logging_config =", "# Test getting Log Directory. def test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__)) + \"\\\\Logs\" self.assertEquals(LoggingConfig().get_log_directory(),", "to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map = {\"LogLevel\": \"INVALIDVALUE\"}", "Logger to be of Type FileLogger.\") # Test getting File Logger. # Override", "LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting INFORMATION.\") # Test getting Logging Level Setting. #", "from baseLogger.LoggingConfig import LoggingConfig from baseLogger.constants.LoggingEnabled import LoggingEnabled from baseLogger.constants.MessageType import MessageType from", "Test getting Logging Enabled Setting with an Illegal Argument # Override Config to", "test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging Enabled Setting", "Logging Level Setting SUCCESS.\") # Test getting Logging Level Setting. # Override Config", "of Type ConsoleLogger.\") # Test getting Log Directory. def test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__))", "new_value_map = {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting GENERIC.\")", "= {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting ERROR.\") #", "@Test(singleThreaded = true) class LoggingConfigUnitTest(unittest.TestCase): # Test getting Logging Enabled Setting. # Override", "LoggingEnabled from baseLogger.constants.MessageType import MessageType from utilities.Config import Config from utilities.StringProcessor import StringProcessor", "Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map =", "true) class LoggingConfigUnitTest(unittest.TestCase): # Test getting Logging Enabled Setting. # Override Config to", "= {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting WARNING.\") #", "Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger to", "= {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting VERBOSE.\") #", "{\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUSPENDED.\") # Test", "{\"Log\": \"ONFAIL\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled Setting", "FileLogger.\") # Test getting File Logger. # Override Config LogType to 'CONSOLE' which", "'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map = {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map,", "Logging Level Setting. # Override Config to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\":", "ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") # Test getting File", "LoggingConfig from baseLogger.constants.LoggingEnabled import LoggingEnabled from baseLogger.constants.MessageType import MessageType from utilities.Config import Config", "{\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging Enabled Setting NO.\") # Test", "Setting INFORMATION.\") # Test getting Logging Level Setting. # Override Config to 'GENERIC'", "Setting. # Override Config to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map,", "'YES' def test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\": \"YES\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()),", "\"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting WARNING.\") # Test getting", "Test getting Logging Level Setting. # Override Config to 'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map", "import FileLogger from baseLogger.LoggingConfig import LoggingConfig from baseLogger.constants.LoggingEnabled import LoggingEnabled from baseLogger.constants.MessageType import", "unit test class. # Tests running in serial. # @Test(singleThreaded = true) class", "# Test getting Logging Level Setting with Illegal Argument. # Override Config to", "\"Expected Logger to be of Type ConsoleLogger.\") # Test getting Log Directory. def", "to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map = {\"Log\": \"INVALIDVALUE\"}", "Override Config LogType to 'CONSOLE' which creates ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map = {\"LogType\":", "Logging Level Setting VERBOSE.\") # Test getting Logging Level Setting. # Override Config", "test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting", "Type FileLogger.\") # Test getting File Logger. # Override Config LogType to 'CONSOLE'", "True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting WARNING.\") # Test getting Logging Level", "Test getting Logging Level Setting with Illegal Argument. # Override Config to 'INVALIDVALUE'", "to be of Type ConsoleLogger.\") # Test getting File Logger. # Override Config", "WARNING.\") # Test getting Logging Level Setting. # Override Config to 'ERROR' def", "Setting WARNING.\") # Test getting Logging Level Setting. # Override Config to 'ERROR'", "ConsoleLogger.\") # Test getting File Logger. # Override Config Log to 'NO' which", "Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting GENERIC.\") # Test getting Logging", "LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting GENERIC.\") # Test getting Logging Level Setting. #", "\"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUSPENDED.\") # Test getting", "{\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUCCESS.\") # Test", "getting Logging Level Setting. # Override Config to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map =", "to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected", "Logging Level Setting ERROR.\") # Test getting Logging Level Setting. # Override Config", "self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUSPENDED.\") # Test getting Logging Level Setting", "from baseLogger.ConsoleLogger import ConsoleLogger from baseLogger.FileLogger import FileLogger from baseLogger.LoggingConfig import LoggingConfig from", "from baseLogger.constants.LoggingEnabled import LoggingEnabled from baseLogger.constants.MessageType import MessageType from utilities.Config import Config from", "self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging Enabled Setting NO.\") # Test getting Logging Enabled Setting", "# Test getting Logging Level Setting. # Override Config to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self):", "instance = isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") #", "'NO' def test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging", "Enabled Setting YES.\") # Test getting Logging Enabled Setting. # Override Config to", "Argument # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError):", "Logging Level Setting. # Override Config to 'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\":", "by default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\"", "Level Setting SUSPENDED.\") # Test getting Logging Level Setting with Illegal Argument. #", "Logging Level Setting with Illegal Argument. # Override Config to 'INVALIDVALUE' - Expect", "True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled Setting ONFAIL.\") # Test getting Logging Enabled", "from baseLogger.FileLogger import FileLogger from baseLogger.LoggingConfig import LoggingConfig from baseLogger.constants.LoggingEnabled import LoggingEnabled from", "new_value_map = {\"log\": \"YES\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging", "# Test getting Logging Enabled Setting. # Override Config to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self):", "getting Logging Enabled Setting. # Override Config to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map =", "getting Logging Level Setting. # Override Config to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map =", "getting Logging Level Setting. # Override Config to 'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map =", "# Test getting Logging Enabled Setting. # Override Config to 'NO' def test_getLoggingDisabledSettingTest(self):", "Setting NO.\") # Test getting Logging Enabled Setting with an Illegal Argument #", "Test getting Logging Level Setting. # Override Config to 'GENERIC' def test_getLoggingLevelGenericSettingTest(self): new_value_map", "= isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\") # Test", "baseLogger.constants.MessageType import MessageType from utilities.Config import Config from utilities.StringProcessor import StringProcessor # Logging", "Logging Enabled Setting. # Override Config to 'NO' def test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\":", "= {\"LogType\": \"CONSOLE\", \"Log\": \"YES\"} logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\"", "True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting ERROR.\") # Test getting Logging Level", "ConsoleLogger.\") # Test getting Log Directory. def test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__)) + \"\\\\Logs\"", "Logging Level Setting WARNING.\") # Test getting Logging Level Setting. # Override Config", "baseLogger.constants.LoggingEnabled import LoggingEnabled from baseLogger.constants.MessageType import MessageType from utilities.Config import Config from utilities.StringProcessor", "\"TestLog.txt\" logger = logging_config.get_logger(file_name) instance = isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be", "test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name)", "new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging Enabled Setting NO.\")", "= {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting INFORMATION.\") #", "MessageType from utilities.Config import Config from utilities.StringProcessor import StringProcessor # Logging Configuration unit", "\"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name) instance = isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be", "Config to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(),", "Setting. # Override Config to 'YES' def test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\": \"YES\"} config", "= {\"LogLevel\": \"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map, True) LoggingConfig(config).get_logging_level_setting() # Test getting File Logger.", "\"Log\": \"YES\"} logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logger = logging_config.get_logger(file_name)", "with Illegal Argument. # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self):", "import ConsoleLogger from baseLogger.FileLogger import FileLogger from baseLogger.LoggingConfig import LoggingConfig from baseLogger.constants.LoggingEnabled import", "Test getting Logging Enabled Setting. # Override Config to 'YES' def test_getLoggingEnabledSettingTest(self): new_value_map", "Setting YES.\") # Test getting Logging Enabled Setting. # Override Config to 'ONFAIL'", "True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUSPENDED.\") # Test getting Logging Level", "= \"TestLog.txt\" logger = logging_config.get_logger(file_name) instance = isinstance(logger, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to", "Config to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(),", "config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging Enabled Setting YES.\") # Test getting Logging", "Argument. # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError):", "Config to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\": \"ONFAIL\"} config = Config() config.add_general_test_setting_values(new_value_map,", "Enabled Setting with an Illegal Argument # Override Config to 'INVALIDVALUE' - Expect", "LoggingConfig().get_logging_enabled_setting() # Test getting Logging Level Setting. # Override Config to 'VERBOSE' def", "unittest from baseLogger.ConsoleLogger import ConsoleLogger from baseLogger.FileLogger import FileLogger from baseLogger.LoggingConfig import LoggingConfig", "import MessageType from utilities.Config import Config from utilities.StringProcessor import StringProcessor # Logging Configuration", "True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging Enabled Setting NO.\") # Test getting Logging Enabled", "Setting ONFAIL.\") # Test getting Logging Enabled Setting. # Override Config to 'NO'", "= LoggingConfig().get_logger(file_name) instance = isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type", "LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting ERROR.\") # Test getting Logging Level Setting. #", "def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map = {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() # Test", "{\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting VERBOSE.\") # Test", "FileLogger from baseLogger.LoggingConfig import LoggingConfig from baseLogger.constants.LoggingEnabled import LoggingEnabled from baseLogger.constants.MessageType import MessageType", "Config Log to 'NO' which creates ConsoleLogger by default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map =", "'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\": \"ONFAIL\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config),", "Override Config Log to 'NO' which creates ConsoleLogger by default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map", "new_value_map = {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUCCESS.\")", "import StringProcessor # Logging Configuration unit test class. # Tests running in serial.", "config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled Setting ONFAIL.\") # Test getting Logging", "Config from utilities.StringProcessor import StringProcessor # Logging Configuration unit test class. # Tests", "# Override Config LogType to 'CONSOLE' which creates ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map =", "\"ONFAIL\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected Logging Enabled Setting ONFAIL.\")", "test_getConsoleLoggerTest(self): new_value_map = {\"LogType\": \"CONSOLE\", \"Log\": \"YES\"} logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name", "Override Config to 'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name,", "Tests running in serial. # @Test(singleThreaded = true) class LoggingConfigUnitTest(unittest.TestCase): # Test getting", "'SUSPENDED' def test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging", "Override Config to 'YES' def test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\": \"YES\"} config = Config()", "Level Setting. # Override Config to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\": \"SUCCESS\"}", "Override Config to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.VERBOSE.name,", "Override Config to 'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name,", "True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting GENERIC.\") # Test getting Logging Level", "Enabled Setting ONFAIL.\") # Test getting Logging Enabled Setting. # Override Config to", "\"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() # Test getting Logging Level Setting. # Override Config", "getting Logging Level Setting with Illegal Argument. # Override Config to 'INVALIDVALUE' -", "Setting GENERIC.\") # Test getting Logging Level Setting. # Override Config to 'SUCCESS'", "to 'ERROR' def test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected", "an Illegal Argument # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self):", "Setting. # Override Config to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map,", "# Logging Configuration unit test class. # Tests running in serial. # @Test(singleThreaded", "def test_getLoggingLevelSuspendedSettingTest(self): new_value_map = {\"LogLevel\": \"SUSPENDED\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level", "'NO' which creates ConsoleLogger by default. def test_getConsoleLoggerLoggingDisabledTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map,", "Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUCCESS.\") # Test getting Logging", "# Override Config to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\": \"ONFAIL\"} config =", "Test getting Logging Level Setting. # Override Config to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map", "Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting WARNING.\") # Test getting Logging", "# Test getting Logging Level Setting. # Override Config to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self):", "= true) class LoggingConfigUnitTest(unittest.TestCase): # Test getting Logging Enabled Setting. # Override Config", "Setting SUSPENDED.\") # Test getting Logging Level Setting with Illegal Argument. # Override", "test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting", "Level Setting SUCCESS.\") # Test getting Logging Level Setting. # Override Config to", "Expect IllegalArgumentException def test_getLoggingLevelIllegalArgumentTest(self): with self.assertRaises(AttributeError): new_value_map = {\"LogLevel\": \"INVALIDVALUE\"} config = Config().add_general_test_setting_values(new_value_map,", "test class. # Tests running in serial. # @Test(singleThreaded = true) class LoggingConfigUnitTest(unittest.TestCase):", "= {\"log\": \"YES\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected Logging Enabled", "Enabled Setting. # Override Config to 'ONFAIL' def test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\": \"ONFAIL\"}", "Test getting Logging Level Setting. # Override Config to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map", "Override Config to 'NO' def test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(),", "Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUSPENDED.\") # Test getting Logging", "- Expect IllegalArgumentException def test_getLoggingSettingIllegalArgumentTest(self): with self.assertRaises(NotImplementedError): new_value_map = {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True)", "test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__)) + \"\\\\Logs\" self.assertEquals(LoggingConfig().get_log_directory(), default_path, StringProcessor.safe_formatter(\"Expected Default Path '{}'.\", default_path))", "getting File Logger. # Override Config LogType to 'TXT' which creates FileLogger. def", "{\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() # Test getting Logging Level Setting. # Override", "test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\": \"YES\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name, \"Expected", "Enabled Setting. # Override Config to 'YES' def test_getLoggingEnabledSettingTest(self): new_value_map = {\"log\": \"YES\"}", "Enabled Setting. # Override Config to 'NO' def test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\": \"NO\"}", "which creates FileLogger. def test_getFileLoggerTest(self): new_value_map = {\"LogType\": \"TXT\", \"Log\": \"YES\"} config =", "LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUSPENDED.\") # Test getting Logging Level Setting with", "self.assertRaises(NotImplementedError): new_value_map = {\"Log\": \"INVALIDVALUE\"} Config().add_general_test_setting_values(new_value_map, True) LoggingConfig().get_logging_enabled_setting() # Test getting Logging Level", "import LoggingEnabled from baseLogger.constants.MessageType import MessageType from utilities.Config import Config from utilities.StringProcessor import", "logging_config = LoggingConfig().get_logger(file_name) instance = isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of", "{\"LogLevel\": \"INFORMATION\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting INFORMATION.\") # Test", "of Type ConsoleLogger.\") # Test getting File Logger. # Override Config Log to", "# Override Config Log to 'NO' which creates ConsoleLogger by default. def test_getConsoleLoggerLoggingDisabledTest(self):", "LoggingEnabled.NO.name, \"Expected Logging Enabled Setting NO.\") # Test getting Logging Enabled Setting with", "def test_getLoggingDisabledSettingTest(self): new_value_map = {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging Enabled", "True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUCCESS.\") # Test getting Logging Level", "import LoggingConfig from baseLogger.constants.LoggingEnabled import LoggingEnabled from baseLogger.constants.MessageType import MessageType from utilities.Config import", "\"Expected Logging Level Setting INFORMATION.\") # Test getting Logging Level Setting. # Override", "ConsoleLogger. def test_getConsoleLoggerTest(self): new_value_map = {\"LogType\": \"CONSOLE\", \"Log\": \"YES\"} logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map,", "True) self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting VERBOSE.\") # Test getting Logging Level", "serial. # @Test(singleThreaded = true) class LoggingConfigUnitTest(unittest.TestCase): # Test getting Logging Enabled Setting.", "{\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting ERROR.\") # Test", "test_getLoggingLevelErrorSettingTest(self): new_value_map = {\"LogLevel\": \"ERROR\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting", "self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting GENERIC.\") # Test getting Logging Level Setting.", "Config LogType to 'TXT' which creates FileLogger. def test_getFileLoggerTest(self): new_value_map = {\"LogType\": \"TXT\",", "self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting INFORMATION.\") # Test getting Logging Level Setting.", "# Test getting Logging Enabled Setting with an Illegal Argument # Override Config", "LoggingConfig().get_logger(file_name) instance = isinstance(logging_config, ConsoleLogger) self.assertTrue(instance, \"Expected Logger to be of Type ConsoleLogger.\")", "Setting. # Override Config to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map,", "\"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting SUCCESS.\") # Test getting", "new_value_map = {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level Setting WARNING.\")", "Level Setting. # Override Config to 'INFORMATION' def test_getLoggingLevelInformationSettingTest(self): new_value_map = {\"LogLevel\": \"INFORMATION\"}", "\"Log\": \"YES\"} config = Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config,", "\"Expected Logging Level Setting VERBOSE.\") # Test getting Logging Level Setting. # Override", "\"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging Enabled Setting NO.\") # Test getting", "def test_getLoggingLevelGenericSettingTest(self): new_value_map = {\"LogLevel\": \"GENERIC\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(), \"Expected Logging Level", "Level Setting VERBOSE.\") # Test getting Logging Level Setting. # Override Config to", "class LoggingConfigUnitTest(unittest.TestCase): # Test getting Logging Enabled Setting. # Override Config to 'YES'", "ONFAIL.\") # Test getting Logging Enabled Setting. # Override Config to 'NO' def", "= LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logger = logging_config.get_logger(file_name) instance = isinstance(logger,", "# Override Config to 'VERBOSE' def test_getLoggingLevelVerboseSettingTest(self): new_value_map = {\"LogLevel\": \"VERBOSE\"} Config().add_general_test_setting_values(new_value_map, True)", "logging_config = LoggingConfig(config).get_logger(file_name) self.assertTrue(isinstance(logging_config, FileLogger), \"Expected Logger to be of Type FileLogger.\") #", "= {\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name, \"Expected Logging Enabled Setting NO.\") #", "\"Expected Logging Level Setting ERROR.\") # Test getting Logging Level Setting. # Override", "# Test getting Logging Level Setting. # Override Config to 'WARNING' def test_getLoggingLevelWarningSettingTest(self):", "# Override Config LogType to 'TXT' which creates FileLogger. def test_getFileLoggerTest(self): new_value_map =", "test_getLoggingEnabledOnFailSettingTest(self): new_value_map = {\"Log\": \"ONFAIL\"} config = Config() config.add_general_test_setting_values(new_value_map, True) self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name, \"Expected", "Setting with an Illegal Argument # Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException", "{\"Log\": \"NO\"} Config().add_general_test_setting_values(new_value_map, True) file_name = \"TestLog.txt\" logging_config = LoggingConfig().get_logger(file_name) instance = isinstance(logging_config,", "def test_getConsoleLoggerTest(self): new_value_map = {\"LogType\": \"CONSOLE\", \"Log\": \"YES\"} logging_config = LoggingConfig() logging_config.add_general_test_setting_values(new_value_map, True)", "Log Directory. def test_getLogDirectoryTest(self): default_path = os.path.abspath(os.path.dirname(__file__)) + \"\\\\Logs\" self.assertEquals(LoggingConfig().get_log_directory(), default_path, StringProcessor.safe_formatter(\"Expected Default", "# Override Config to 'SUCCESS' def test_getLoggingLevelSuccessSettingTest(self): new_value_map = {\"LogLevel\": \"SUCCESS\"} Config().add_general_test_setting_values(new_value_map, True)", "# Test getting File Logger. # Override Config LogType to 'CONSOLE' which creates", "Setting. # Override Config to 'WARNING' def test_getLoggingLevelWarningSettingTest(self): new_value_map = {\"LogLevel\": \"WARNING\"} Config().add_general_test_setting_values(new_value_map,", "Logging Enabled Setting ONFAIL.\") # Test getting Logging Enabled Setting. # Override Config" ]
[ "fpcs = [] for ncenters, ax in enumerate(axes1.reshape(-1), 2): cntr, u, u0, d,", "for pt in cntr: ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers = {0}; FPC = {1:.2f}'.format(ncenters,", "xpts = np.zeros(len(y)) ypts = np.zeros(len(y)) labels = np.zeros(len(y)) # no labels #", "points x2 clusters.') plt.show() # Set up the loop and plot fig1, axes1", "[line.split() for line in textFile] y = np.array(y) X = np.zeros(shape=(200,2)) # stores", "the center of each fuzzy cluster for pt in cntr: ax.plot(pt[0], pt[1], 'rs')", "pt in cntr: ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers = {0}; FPC = {1:.2f}'.format(ncenters, fpc))", "\"\\n\") # u0 is the array of the memberiship functions for i in", "fuzz colors = ['b', 'orange', 'g', 'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen']", "j], ypts[cluster_membership == j], '.', color=colors[j]) # Mark the center of each fuzzy", "#number of clusters print(\"Cluster: \", j, \"\\n\", u0[j][i]) #membership for cluster print() #", "enumerate(axes1.reshape(-1), 2): cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans( alldata, ncenters,", "'ForestGreen'] # Insert his test data instead !!!! # Then our data #", "rows][0] for i in range (0, len(y)): xpts[i] = X[i][0] # ypts =", "data point in training set cluster_membership = np.argmax(u, axis=0) for j in range(ncenters):", "cuz theyre not labeled... ax0.plot(xpts[labels == label], ypts[labels == label], '.', color=colors[label]) ax0.set_title('Test", "kinds of labels, only have 1 cuz theyre not labeled... ax0.plot(xpts[labels == label],", "p, fpc = fuzz.cluster.cmeans( alldata, ncenters, 2, error=0.005, maxiter=1000, init=None) print(\"Centers = \",", "for i in range (len(y)): # columns print (\"Data point: \",xpts[i], \",\", ypts[i])", "colors = ['b', 'orange', 'g', 'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen'] #", "rows for j in range(0,len(y[0])): # num columns X[i,j] = float(y[i,j]) xpts =", "= x[all rows][0] for i in range (0, len(y)): xpts[i] = X[i][0] #", "for i in range (0, len(y)): ypts[i] = X[i][1] # Visualize the test", "in range (len(y)): # columns print (\"Data point: \",xpts[i], \",\", ypts[i]) #data point", "8)) #number of figures alldata = np.vstack((xpts, ypts)) fpcs = [] for ncenters,", "Mark the center of each fuzzy cluster for pt in cntr: ax.plot(pt[0], pt[1],", "2 different kinds of labels, only have 1 cuz theyre not labeled... ax0.plot(xpts[labels", "for j in range(ncenters): #number of clusters print(\"Cluster: \", j, \"\\n\", u0[j][i]) #membership", "i in range (0, len(y)): ypts[i] = X[i][1] # Visualize the test data", "and plot fig1, axes1 = plt.subplots(2, 1, figsize=(8, 8)) #number of figures alldata", "textFile: y = [line.split() for line in textFile] y = np.array(y) X =", "each data point in training set cluster_membership = np.argmax(u, axis=0) for j in", "print(\"Centers = \", str(ncenters), \"\\n\") # u0 is the array of the memberiship", "fig0, ax0 = plt.subplots() for label in range(2): # need 2 different kinds", "figures alldata = np.vstack((xpts, ypts)) fpcs = [] for ncenters, ax in enumerate(axes1.reshape(-1),", "test data fig0, ax0 = plt.subplots() for label in range(2): # need 2", "jm, p, fpc = fuzz.cluster.cmeans( alldata, ncenters, 2, error=0.005, maxiter=1000, init=None) print(\"Centers =", "training set cluster_membership = np.argmax(u, axis=0) for j in range(ncenters): ax.plot(xpts[cluster_membership == j],", "ypts = x[all rows][1] for i in range (0, len(y)): ypts[i] = X[i][1]", "ypts[i]) #data point print(\"Membership: \") for j in range(ncenters): #number of clusters print(\"Cluster:", "each fuzzy cluster for pt in cntr: ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers = {0};", "textFile] y = np.array(y) X = np.zeros(shape=(200,2)) # stores test data as number", "\", str(ncenters), \"\\n\") # u0 is the array of the memberiship functions for", "i in range (len(y)): # columns print (\"Data point: \",xpts[i], \",\", ypts[i]) #data", "strings) for i in range(0,len(y)): # num rows for j in range(0,len(y[0])): #", "alldata = np.vstack((xpts, ypts)) fpcs = [] for ncenters, ax in enumerate(axes1.reshape(-1), 2):", "the array of the memberiship functions for i in range (len(y)): # columns", "\"\\n\", u0[j][i]) #membership for cluster print() # Store fpc values for later fpcs.append(fpc)", "= np.zeros(len(y)) # no labels # xpts = x[all rows][0] for i in", "x[all rows][1] for i in range (0, len(y)): ypts[i] = X[i][1] # Visualize", "d, jm, p, fpc = fuzz.cluster.cmeans( alldata, ncenters, 2, error=0.005, maxiter=1000, init=None) print(\"Centers", "= np.zeros(shape=(200,2)) # stores test data as number in array X (converts from", "plt import skfuzzy as fuzz colors = ['b', 'orange', 'g', 'r', 'c', 'm',", "Set up the loop and plot fig1, axes1 = plt.subplots(2, 1, figsize=(8, 8))", "# Store fpc values for later fpcs.append(fpc) # Plot assigned clusters, for each", "'c', 'm', 'y', 'k', 'Brown', 'ForestGreen'] # Insert his test data instead !!!!", "j in range(ncenters): ax.plot(xpts[cluster_membership == j], ypts[cluster_membership == j], '.', color=colors[j]) # Mark", "fuzz.cluster.cmeans( alldata, ncenters, 2, error=0.005, maxiter=1000, init=None) print(\"Centers = \", str(ncenters), \"\\n\") #", "'orange', 'g', 'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen'] # Insert his test", "the test data fig0, ax0 = plt.subplots() for label in range(2): # need", "different kinds of labels, only have 1 cuz theyre not labeled... ax0.plot(xpts[labels ==", "== label], ypts[labels == label], '.', color=colors[label]) ax0.set_title('Test data: 200 points x2 clusters.')", "array of the memberiship functions for i in range (len(y)): # columns print", "theyre not labeled... ax0.plot(xpts[labels == label], ypts[labels == label], '.', color=colors[label]) ax0.set_title('Test data:", "data fig0, ax0 = plt.subplots() for label in range(2): # need 2 different", "x2 clusters.') plt.show() # Set up the loop and plot fig1, axes1 =", "up the loop and plot fig1, axes1 = plt.subplots(2, 1, figsize=(8, 8)) #number", "(\"Data point: \",xpts[i], \",\", ypts[i]) #data point print(\"Membership: \") for j in range(ncenters):", "ax0.plot(xpts[labels == label], ypts[labels == label], '.', color=colors[label]) ax0.set_title('Test data: 200 points x2", "= fuzz.cluster.cmeans( alldata, ncenters, 2, error=0.005, maxiter=1000, init=None) print(\"Centers = \", str(ncenters), \"\\n\")", "x[all rows][0] for i in range (0, len(y)): xpts[i] = X[i][0] # ypts", "assigned clusters, for each data point in training set cluster_membership = np.argmax(u, axis=0)", "maxiter=1000, init=None) print(\"Centers = \", str(ncenters), \"\\n\") # u0 is the array of", "#number of figures alldata = np.vstack((xpts, ypts)) fpcs = [] for ncenters, ax", "data instead !!!! # Then our data # Collect Test Data with open(\"testFun.dat\")", "= np.zeros(len(y)) labels = np.zeros(len(y)) # no labels # xpts = x[all rows][0]", "# Visualize the test data fig0, ax0 = plt.subplots() for label in range(2):", "y = np.array(y) X = np.zeros(shape=(200,2)) # stores test data as number in", "range(ncenters): #number of clusters print(\"Cluster: \", j, \"\\n\", u0[j][i]) #membership for cluster print()", "alldata, ncenters, 2, error=0.005, maxiter=1000, init=None) print(\"Centers = \", str(ncenters), \"\\n\") # u0", "instead !!!! # Then our data # Collect Test Data with open(\"testFun.dat\") as", "# Collect Test Data with open(\"testFun.dat\") as textFile: y = [line.split() for line", "Insert his test data instead !!!! # Then our data # Collect Test", "axis=0) for j in range(ncenters): ax.plot(xpts[cluster_membership == j], ypts[cluster_membership == j], '.', color=colors[j])", "\",xpts[i], \",\", ypts[i]) #data point print(\"Membership: \") for j in range(ncenters): #number of", "float(y[i,j]) xpts = np.zeros(len(y)) ypts = np.zeros(len(y)) labels = np.zeros(len(y)) # no labels", "Collect Test Data with open(\"testFun.dat\") as textFile: y = [line.split() for line in", "# need 2 different kinds of labels, only have 1 cuz theyre not", "data # Collect Test Data with open(\"testFun.dat\") as textFile: y = [line.split() for", "print() # Store fpc values for later fpcs.append(fpc) # Plot assigned clusters, for", "num columns X[i,j] = float(y[i,j]) xpts = np.zeros(len(y)) ypts = np.zeros(len(y)) labels =", "fpcs.append(fpc) # Plot assigned clusters, for each data point in training set cluster_membership", "test data as number in array X (converts from strings) for i in", "= [] for ncenters, ax in enumerate(axes1.reshape(-1), 2): cntr, u, u0, d, jm,", "point in training set cluster_membership = np.argmax(u, axis=0) for j in range(ncenters): ax.plot(xpts[cluster_membership", "# Set up the loop and plot fig1, axes1 = plt.subplots(2, 1, figsize=(8,", "= X[i][0] # ypts = x[all rows][1] for i in range (0, len(y)):", "labeled... ax0.plot(xpts[labels == label], ypts[labels == label], '.', color=colors[label]) ax0.set_title('Test data: 200 points", "'.', color=colors[label]) ax0.set_title('Test data: 200 points x2 clusters.') plt.show() # Set up the", "plt.show() # Set up the loop and plot fig1, axes1 = plt.subplots(2, 1,", "our data # Collect Test Data with open(\"testFun.dat\") as textFile: y = [line.split()", "200 points x2 clusters.') plt.show() # Set up the loop and plot fig1,", "numpy as np import matplotlib.pyplot as plt import skfuzzy as fuzz colors =", "len(y)): xpts[i] = X[i][0] # ypts = x[all rows][1] for i in range", "= plt.subplots(2, 1, figsize=(8, 8)) #number of figures alldata = np.vstack((xpts, ypts)) fpcs", "his test data instead !!!! # Then our data # Collect Test Data", "import numpy as np import matplotlib.pyplot as plt import skfuzzy as fuzz colors", "ax0 = plt.subplots() for label in range(2): # need 2 different kinds of", "Store fpc values for later fpcs.append(fpc) # Plot assigned clusters, for each data", "np.vstack((xpts, ypts)) fpcs = [] for ncenters, ax in enumerate(axes1.reshape(-1), 2): cntr, u,", "in range(ncenters): #number of clusters print(\"Cluster: \", j, \"\\n\", u0[j][i]) #membership for cluster", "'k', 'Brown', 'ForestGreen'] # Insert his test data instead !!!! # Then our", "need 2 different kinds of labels, only have 1 cuz theyre not labeled...", "#membership for cluster print() # Store fpc values for later fpcs.append(fpc) # Plot", "clusters, for each data point in training set cluster_membership = np.argmax(u, axis=0) for", "\", j, \"\\n\", u0[j][i]) #membership for cluster print() # Store fpc values for", "ypts)) fpcs = [] for ncenters, ax in enumerate(axes1.reshape(-1), 2): cntr, u, u0,", "(0, len(y)): xpts[i] = X[i][0] # ypts = x[all rows][1] for i in", "for j in range(ncenters): ax.plot(xpts[cluster_membership == j], ypts[cluster_membership == j], '.', color=colors[j]) #", "with open(\"testFun.dat\") as textFile: y = [line.split() for line in textFile] y =", "'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen'] # Insert his test data instead", "np.zeros(len(y)) # no labels # xpts = x[all rows][0] for i in range", "np.array(y) X = np.zeros(shape=(200,2)) # stores test data as number in array X", "is the array of the memberiship functions for i in range (len(y)): #", "only have 1 cuz theyre not labeled... ax0.plot(xpts[labels == label], ypts[labels == label],", "as np import matplotlib.pyplot as plt import skfuzzy as fuzz colors = ['b',", "num rows for j in range(0,len(y[0])): # num columns X[i,j] = float(y[i,j]) xpts", "fpc values for later fpcs.append(fpc) # Plot assigned clusters, for each data point", "# ypts = x[all rows][1] for i in range (0, len(y)): ypts[i] =", "len(y)): ypts[i] = X[i][1] # Visualize the test data fig0, ax0 = plt.subplots()", "print(\"Cluster: \", j, \"\\n\", u0[j][i]) #membership for cluster print() # Store fpc values", "of figures alldata = np.vstack((xpts, ypts)) fpcs = [] for ncenters, ax in", "'.', color=colors[j]) # Mark the center of each fuzzy cluster for pt in", "loop and plot fig1, axes1 = plt.subplots(2, 1, figsize=(8, 8)) #number of figures", "in range(2): # need 2 different kinds of labels, only have 1 cuz", "label], '.', color=colors[label]) ax0.set_title('Test data: 200 points x2 clusters.') plt.show() # Set up", "Then our data # Collect Test Data with open(\"testFun.dat\") as textFile: y =", "in cntr: ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers = {0}; FPC = {1:.2f}'.format(ncenters, fpc)) ax.axis('off')", "j in range(ncenters): #number of clusters print(\"Cluster: \", j, \"\\n\", u0[j][i]) #membership for", "(0, len(y)): ypts[i] = X[i][1] # Visualize the test data fig0, ax0 =", "plt.subplots() for label in range(2): # need 2 different kinds of labels, only", "as textFile: y = [line.split() for line in textFile] y = np.array(y) X", "range (0, len(y)): xpts[i] = X[i][0] # ypts = x[all rows][1] for i", "#fuzzytest.py #<NAME> #<NAME> #fuzzy clustering for testFun.dat from __future__ import division, print_function import", "in textFile] y = np.array(y) X = np.zeros(shape=(200,2)) # stores test data as", "as number in array X (converts from strings) for i in range(0,len(y)): #", "# Plot assigned clusters, for each data point in training set cluster_membership =", "figsize=(8, 8)) #number of figures alldata = np.vstack((xpts, ypts)) fpcs = [] for", "not labeled... ax0.plot(xpts[labels == label], ypts[labels == label], '.', color=colors[label]) ax0.set_title('Test data: 200", "j], '.', color=colors[j]) # Mark the center of each fuzzy cluster for pt", "center of each fuzzy cluster for pt in cntr: ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers", "= ['b', 'orange', 'g', 'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen'] # Insert", "np import matplotlib.pyplot as plt import skfuzzy as fuzz colors = ['b', 'orange',", "range(ncenters): ax.plot(xpts[cluster_membership == j], ypts[cluster_membership == j], '.', color=colors[j]) # Mark the center", "X[i][0] # ypts = x[all rows][1] for i in range (0, len(y)): ypts[i]", "'g', 'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen'] # Insert his test data", "values for later fpcs.append(fpc) # Plot assigned clusters, for each data point in", "for ncenters, ax in enumerate(axes1.reshape(-1), 2): cntr, u, u0, d, jm, p, fpc", "= np.zeros(len(y)) ypts = np.zeros(len(y)) labels = np.zeros(len(y)) # no labels # xpts", "str(ncenters), \"\\n\") # u0 is the array of the memberiship functions for i", "# columns print (\"Data point: \",xpts[i], \",\", ypts[i]) #data point print(\"Membership: \") for", "clustering for testFun.dat from __future__ import division, print_function import numpy as np import", "== j], ypts[cluster_membership == j], '.', color=colors[j]) # Mark the center of each", "cluster print() # Store fpc values for later fpcs.append(fpc) # Plot assigned clusters,", "(converts from strings) for i in range(0,len(y)): # num rows for j in", "in enumerate(axes1.reshape(-1), 2): cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans( alldata,", "print(\"Membership: \") for j in range(ncenters): #number of clusters print(\"Cluster: \", j, \"\\n\",", "ax0.set_title('Test data: 200 points x2 clusters.') plt.show() # Set up the loop and", "axes1 = plt.subplots(2, 1, figsize=(8, 8)) #number of figures alldata = np.vstack((xpts, ypts))", "in range(0,len(y[0])): # num columns X[i,j] = float(y[i,j]) xpts = np.zeros(len(y)) ypts =", "testFun.dat from __future__ import division, print_function import numpy as np import matplotlib.pyplot as", "print_function import numpy as np import matplotlib.pyplot as plt import skfuzzy as fuzz", "# u0 is the array of the memberiship functions for i in range", "i in range (0, len(y)): xpts[i] = X[i][0] # ypts = x[all rows][1]", "ax.plot(xpts[cluster_membership == j], ypts[cluster_membership == j], '.', color=colors[j]) # Mark the center of", "from __future__ import division, print_function import numpy as np import matplotlib.pyplot as plt", "ax in enumerate(axes1.reshape(-1), 2): cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(", "Test Data with open(\"testFun.dat\") as textFile: y = [line.split() for line in textFile]", "init=None) print(\"Centers = \", str(ncenters), \"\\n\") # u0 is the array of the", "for each data point in training set cluster_membership = np.argmax(u, axis=0) for j", "point: \",xpts[i], \",\", ypts[i]) #data point print(\"Membership: \") for j in range(ncenters): #number", "of labels, only have 1 cuz theyre not labeled... ax0.plot(xpts[labels == label], ypts[labels", "__future__ import division, print_function import numpy as np import matplotlib.pyplot as plt import", "ypts[labels == label], '.', color=colors[label]) ax0.set_title('Test data: 200 points x2 clusters.') plt.show() #", "u0, d, jm, p, fpc = fuzz.cluster.cmeans( alldata, ncenters, 2, error=0.005, maxiter=1000, init=None)", "for label in range(2): # need 2 different kinds of labels, only have", "range (0, len(y)): ypts[i] = X[i][1] # Visualize the test data fig0, ax0", "np.zeros(len(y)) labels = np.zeros(len(y)) # no labels # xpts = x[all rows][0] for", "set cluster_membership = np.argmax(u, axis=0) for j in range(ncenters): ax.plot(xpts[cluster_membership == j], ypts[cluster_membership", "1, figsize=(8, 8)) #number of figures alldata = np.vstack((xpts, ypts)) fpcs = []", "ypts = np.zeros(len(y)) labels = np.zeros(len(y)) # no labels # xpts = x[all", "for i in range(0,len(y)): # num rows for j in range(0,len(y[0])): # num", "in range(ncenters): ax.plot(xpts[cluster_membership == j], ypts[cluster_membership == j], '.', color=colors[j]) # Mark the", "u, u0, d, jm, p, fpc = fuzz.cluster.cmeans( alldata, ncenters, 2, error=0.005, maxiter=1000,", "labels = np.zeros(len(y)) # no labels # xpts = x[all rows][0] for i", "for line in textFile] y = np.array(y) X = np.zeros(shape=(200,2)) # stores test", "== j], '.', color=colors[j]) # Mark the center of each fuzzy cluster for", "cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans( alldata, ncenters, 2, error=0.005,", "= x[all rows][1] for i in range (0, len(y)): ypts[i] = X[i][1] #", "for later fpcs.append(fpc) # Plot assigned clusters, for each data point in training", "label], ypts[labels == label], '.', color=colors[label]) ax0.set_title('Test data: 200 points x2 clusters.') plt.show()", "i in range(0,len(y)): # num rows for j in range(0,len(y[0])): # num columns", "range(2): # need 2 different kinds of labels, only have 1 cuz theyre", "range(0,len(y)): # num rows for j in range(0,len(y[0])): # num columns X[i,j] =", "cluster for pt in cntr: ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers = {0}; FPC =", "for i in range (0, len(y)): xpts[i] = X[i][0] # ypts = x[all", "Plot assigned clusters, for each data point in training set cluster_membership = np.argmax(u,", "print (\"Data point: \",xpts[i], \",\", ypts[i]) #data point print(\"Membership: \") for j in", "#data point print(\"Membership: \") for j in range(ncenters): #number of clusters print(\"Cluster: \",", "# Insert his test data instead !!!! # Then our data # Collect", "from strings) for i in range(0,len(y)): # num rows for j in range(0,len(y[0])):", "#fuzzy clustering for testFun.dat from __future__ import division, print_function import numpy as np", "color=colors[j]) # Mark the center of each fuzzy cluster for pt in cntr:", "in training set cluster_membership = np.argmax(u, axis=0) for j in range(ncenters): ax.plot(xpts[cluster_membership ==", "\",\", ypts[i]) #data point print(\"Membership: \") for j in range(ncenters): #number of clusters", "[] for ncenters, ax in enumerate(axes1.reshape(-1), 2): cntr, u, u0, d, jm, p,", "# num rows for j in range(0,len(y[0])): # num columns X[i,j] = float(y[i,j])", "# num columns X[i,j] = float(y[i,j]) xpts = np.zeros(len(y)) ypts = np.zeros(len(y)) labels", "array X (converts from strings) for i in range(0,len(y)): # num rows for", "memberiship functions for i in range (len(y)): # columns print (\"Data point: \",xpts[i],", "Data with open(\"testFun.dat\") as textFile: y = [line.split() for line in textFile] y", "xpts = x[all rows][0] for i in range (0, len(y)): xpts[i] = X[i][0]", "['b', 'orange', 'g', 'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen'] # Insert his", "for j in range(0,len(y[0])): # num columns X[i,j] = float(y[i,j]) xpts = np.zeros(len(y))", "labels # xpts = x[all rows][0] for i in range (0, len(y)): xpts[i]", "= \", str(ncenters), \"\\n\") # u0 is the array of the memberiship functions", "X[i][1] # Visualize the test data fig0, ax0 = plt.subplots() for label in", "xpts[i] = X[i][0] # ypts = x[all rows][1] for i in range (0,", "2, error=0.005, maxiter=1000, init=None) print(\"Centers = \", str(ncenters), \"\\n\") # u0 is the", "test data instead !!!! # Then our data # Collect Test Data with", "point print(\"Membership: \") for j in range(ncenters): #number of clusters print(\"Cluster: \", j,", "for testFun.dat from __future__ import division, print_function import numpy as np import matplotlib.pyplot", "= np.array(y) X = np.zeros(shape=(200,2)) # stores test data as number in array", "1 cuz theyre not labeled... ax0.plot(xpts[labels == label], ypts[labels == label], '.', color=colors[label])", "plt.subplots(2, 1, figsize=(8, 8)) #number of figures alldata = np.vstack((xpts, ypts)) fpcs =", "= float(y[i,j]) xpts = np.zeros(len(y)) ypts = np.zeros(len(y)) labels = np.zeros(len(y)) # no", "\") for j in range(ncenters): #number of clusters print(\"Cluster: \", j, \"\\n\", u0[j][i])", "the memberiship functions for i in range (len(y)): # columns print (\"Data point:", "no labels # xpts = x[all rows][0] for i in range (0, len(y)):", "as plt import skfuzzy as fuzz colors = ['b', 'orange', 'g', 'r', 'c',", "j, \"\\n\", u0[j][i]) #membership for cluster print() # Store fpc values for later", "= np.argmax(u, axis=0) for j in range(ncenters): ax.plot(xpts[cluster_membership == j], ypts[cluster_membership == j],", "== label], '.', color=colors[label]) ax0.set_title('Test data: 200 points x2 clusters.') plt.show() # Set", "in range(0,len(y)): # num rows for j in range(0,len(y[0])): # num columns X[i,j]", "ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers = {0}; FPC = {1:.2f}'.format(ncenters, fpc)) ax.axis('off') fig1.tight_layout() plt.show()", "u0[j][i]) #membership for cluster print() # Store fpc values for later fpcs.append(fpc) #", "range (len(y)): # columns print (\"Data point: \",xpts[i], \",\", ypts[i]) #data point print(\"Membership:", "for cluster print() # Store fpc values for later fpcs.append(fpc) # Plot assigned", "data as number in array X (converts from strings) for i in range(0,len(y)):", "number in array X (converts from strings) for i in range(0,len(y)): # num", "division, print_function import numpy as np import matplotlib.pyplot as plt import skfuzzy as", "skfuzzy as fuzz colors = ['b', 'orange', 'g', 'r', 'c', 'm', 'y', 'k',", "import matplotlib.pyplot as plt import skfuzzy as fuzz colors = ['b', 'orange', 'g',", "j in range(0,len(y[0])): # num columns X[i,j] = float(y[i,j]) xpts = np.zeros(len(y)) ypts", "# Mark the center of each fuzzy cluster for pt in cntr: ax.plot(pt[0],", "in array X (converts from strings) for i in range(0,len(y)): # num rows", "of clusters print(\"Cluster: \", j, \"\\n\", u0[j][i]) #membership for cluster print() # Store", "'y', 'k', 'Brown', 'ForestGreen'] # Insert his test data instead !!!! # Then", "X = np.zeros(shape=(200,2)) # stores test data as number in array X (converts", "'m', 'y', 'k', 'Brown', 'ForestGreen'] # Insert his test data instead !!!! #", "Visualize the test data fig0, ax0 = plt.subplots() for label in range(2): #", "matplotlib.pyplot as plt import skfuzzy as fuzz colors = ['b', 'orange', 'g', 'r',", "ypts[i] = X[i][1] # Visualize the test data fig0, ax0 = plt.subplots() for", "# Then our data # Collect Test Data with open(\"testFun.dat\") as textFile: y", "import division, print_function import numpy as np import matplotlib.pyplot as plt import skfuzzy", "# stores test data as number in array X (converts from strings) for", "columns print (\"Data point: \",xpts[i], \",\", ypts[i]) #data point print(\"Membership: \") for j", "the loop and plot fig1, axes1 = plt.subplots(2, 1, figsize=(8, 8)) #number of", "!!!! # Then our data # Collect Test Data with open(\"testFun.dat\") as textFile:", "#<NAME> #<NAME> #fuzzy clustering for testFun.dat from __future__ import division, print_function import numpy", "# no labels # xpts = x[all rows][0] for i in range (0,", "= np.vstack((xpts, ypts)) fpcs = [] for ncenters, ax in enumerate(axes1.reshape(-1), 2): cntr,", "plot fig1, axes1 = plt.subplots(2, 1, figsize=(8, 8)) #number of figures alldata =", "np.argmax(u, axis=0) for j in range(ncenters): ax.plot(xpts[cluster_membership == j], ypts[cluster_membership == j], '.',", "labels, only have 1 cuz theyre not labeled... ax0.plot(xpts[labels == label], ypts[labels ==", "fpc = fuzz.cluster.cmeans( alldata, ncenters, 2, error=0.005, maxiter=1000, init=None) print(\"Centers = \", str(ncenters),", "= plt.subplots() for label in range(2): # need 2 different kinds of labels,", "fig1, axes1 = plt.subplots(2, 1, figsize=(8, 8)) #number of figures alldata = np.vstack((xpts,", "range(0,len(y[0])): # num columns X[i,j] = float(y[i,j]) xpts = np.zeros(len(y)) ypts = np.zeros(len(y))", "columns X[i,j] = float(y[i,j]) xpts = np.zeros(len(y)) ypts = np.zeros(len(y)) labels = np.zeros(len(y))", "u0 is the array of the memberiship functions for i in range (len(y)):", "= X[i][1] # Visualize the test data fig0, ax0 = plt.subplots() for label", "stores test data as number in array X (converts from strings) for i", "functions for i in range (len(y)): # columns print (\"Data point: \",xpts[i], \",\",", "label in range(2): # need 2 different kinds of labels, only have 1", "# xpts = x[all rows][0] for i in range (0, len(y)): xpts[i] =", "ncenters, ax in enumerate(axes1.reshape(-1), 2): cntr, u, u0, d, jm, p, fpc =", "import skfuzzy as fuzz colors = ['b', 'orange', 'g', 'r', 'c', 'm', 'y',", "of the memberiship functions for i in range (len(y)): # columns print (\"Data", "clusters print(\"Cluster: \", j, \"\\n\", u0[j][i]) #membership for cluster print() # Store fpc", "fuzzy cluster for pt in cntr: ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers = {0}; FPC", "np.zeros(shape=(200,2)) # stores test data as number in array X (converts from strings)", "as fuzz colors = ['b', 'orange', 'g', 'r', 'c', 'm', 'y', 'k', 'Brown',", "line in textFile] y = np.array(y) X = np.zeros(shape=(200,2)) # stores test data", "color=colors[label]) ax0.set_title('Test data: 200 points x2 clusters.') plt.show() # Set up the loop", "clusters.') plt.show() # Set up the loop and plot fig1, axes1 = plt.subplots(2,", "of each fuzzy cluster for pt in cntr: ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers =", "data: 200 points x2 clusters.') plt.show() # Set up the loop and plot", "(len(y)): # columns print (\"Data point: \",xpts[i], \",\", ypts[i]) #data point print(\"Membership: \")", "ncenters, 2, error=0.005, maxiter=1000, init=None) print(\"Centers = \", str(ncenters), \"\\n\") # u0 is", "np.zeros(len(y)) ypts = np.zeros(len(y)) labels = np.zeros(len(y)) # no labels # xpts =", "X (converts from strings) for i in range(0,len(y)): # num rows for j", "cntr: ax.plot(pt[0], pt[1], 'rs') ax.set_title('Centers = {0}; FPC = {1:.2f}'.format(ncenters, fpc)) ax.axis('off') fig1.tight_layout()", "rows][1] for i in range (0, len(y)): ypts[i] = X[i][1] # Visualize the", "= [line.split() for line in textFile] y = np.array(y) X = np.zeros(shape=(200,2)) #", "in range (0, len(y)): xpts[i] = X[i][0] # ypts = x[all rows][1] for", "X[i,j] = float(y[i,j]) xpts = np.zeros(len(y)) ypts = np.zeros(len(y)) labels = np.zeros(len(y)) #", "#<NAME> #fuzzy clustering for testFun.dat from __future__ import division, print_function import numpy as", "later fpcs.append(fpc) # Plot assigned clusters, for each data point in training set", "open(\"testFun.dat\") as textFile: y = [line.split() for line in textFile] y = np.array(y)", "'Brown', 'ForestGreen'] # Insert his test data instead !!!! # Then our data", "error=0.005, maxiter=1000, init=None) print(\"Centers = \", str(ncenters), \"\\n\") # u0 is the array", "ypts[cluster_membership == j], '.', color=colors[j]) # Mark the center of each fuzzy cluster", "cluster_membership = np.argmax(u, axis=0) for j in range(ncenters): ax.plot(xpts[cluster_membership == j], ypts[cluster_membership ==", "2): cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans( alldata, ncenters, 2,", "have 1 cuz theyre not labeled... ax0.plot(xpts[labels == label], ypts[labels == label], '.',", "in range (0, len(y)): ypts[i] = X[i][1] # Visualize the test data fig0,", "y = [line.split() for line in textFile] y = np.array(y) X = np.zeros(shape=(200,2))" ]
[ "(str(request.url_rule).split(\"/\")[-2] == \"likes\") if likes: qresult = Likes.query.filter_by(user=uid, media=mid) else: qresult = Dislikes.query.filter_by(user=uid,", "True app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate using os db = SQLAlchemy(app) djv =", "\"\"\" self.signature = signature class IndexedMedia(db.Model): \"\"\" Map existing songs table to a", "recognizeMedia.AsyncResult(sid) if recognizer.ready(): if recognizer.successful(): result = recognizer.get() return jsonify(result['data']), result['code'] if recognizer.failed():", "= duration self.author = author self.mtype = mtype self.sid = sid class Likes(db.Model):", "of user likes for a media. \"\"\" try: if Media.query.get(mid) == None: abort(404,", "primary_key=True, autoincrement=True) signature = db.Column(db.String(255), primary_key=True) def __init__(self, signature): \"\"\" Initialize class. \"\"\"", "#circumvent int primary key req user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds = db.Column(db.JSON,", "@clry.task(bind=True) def recognizeMedia(self, filepath): #TODO: Use sth better than filenames result = {}", "\"\"\" return \"Hello \" + get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self, media): \"\"\" Fingerprint and", "jwt = JWTManager(app) clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create upload folders", "None: abort(400, \"Json data not provided.\") json_data = request.get_json() try: data = media_schema.load(json_data)", "testInstall(): \"\"\" Test installation. \"\"\" return \"Hello \" + get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self,", "asynctask = fingerprintMedia.delay(data) #TODO: Ensure celery always recieves task b4 returning return jsonify({\"uuid\":", "def process_json(self, data): \"\"\" Convert json string to array before passing it to", "table = db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine) __table__ = table id = table.c.song_id name", "= fields.Int(required=True, dump_only=True) name = fields.Str(required=True) author = fields.Str(required=True) duration = fields.Int(default=0, missing=0)", "pydub import AudioSegment from dejavu import Dejavu from dejavu.recognize import FileRecognizer from flask", "request, abort, jsonify from werkzeug.utils import secure_filename from celery import Celery, states from", "name = db.Column(db.String(255), nullable=False) duration = db.Column(db.BigInteger, nullable=False) author = db.Column(db.String(255), nullable=False) mtype", "open(configpath) as f: config = json.load(f) except IOError as err: print(\"Cannot open configuration:", "\"media\": mid, \"seconds\": data[\"seconds\"]}), 201 elif request.method == 'PUT': if not existingRatings: abort(404,", "nullable=False) sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self, name, duration, author, mtype, sid): \"\"\"", "None if url != None: #fingerprint try: yt = YouTube(url) except Exception as", "resource id, name, author and time index of a sampled media. \"\"\" #TODO:", "ValidationError('Please provide another signature.') def mediaTypeValidator(data): \"\"\" Validate media type. \"\"\" if data", "user self.media = media self.seconds = seconds db.create_all() # marshmallow schemas def userSignatureValidator(data):", "file after use except Exception as err: return {\"data\":{\"msg\":\"Unable to index media.\"}, \"code\":", "= json.loads(data.seconds) return data user_schema = UserSchema() media_schema = MediaSchema() media_list_schema = MediaSchema(many=True)", "media.\"}, \"code\": 500} if sid <= 0: return {\"data\":{\"msg\":\"Media already exists.\"}, \"code\": 409}", "in request.files: abort(400, \"No file.\") file = request.files['file'] if file.filename == '': abort(400,", "= db.Column(db.JSON, nullable=False) def __init__(self, user, media, seconds): \"\"\" Initialize class. \"\"\" self.user", "import os import json from os import path as pth from pytube import", "jsonify(user_likes_schema.dump(existingRatings)) elif request.method == 'DELETE': if not existingRatings: abort(404, \"Ratings not found.\") qresult.delete()", "marshmallow import Schema, fields, ValidationError, pre_dump from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, JWTManager", "def fingerprintStatusApi(sid): \"\"\" Retrieve the status of a fingerprinting task. \"\"\" fingerprinter =", "secure_filename from celery import Celery, states from celery.exceptions import Ignore from flask_sqlalchemy import", "models class Users(db.Model): \"\"\" Users model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) signature", "indexed = fields.Method('check_indexed', dump_only=True) def check_indexed(self, media): \"\"\" Return Boolean indicator if media", "import AudioSegment from dejavu import Dejavu from dejavu.recognize import FileRecognizer from flask import", "media. \"\"\" url = media.get(\"url\", None) sid = None if url != None:", "class Likes(db.Model): \"\"\" Likes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int", "if likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating: jsonify([])", "as err: return jsonify(err.messages), 400 asynctask = fingerprintMedia.delay(data) #TODO: Ensure celery always recieves", "# rmv file after use except Exception as err: return {\"data\":{\"msg\":\"Unable to index", "raise Exception except Exception as e: abort(404, \"Media not found.\") likes = (str(request.url_rule).split(\"/\")[-2]", "= None for i in xrange(0, len(stream_list)): if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream =", "== 'POST': if existingRatings: abort(409, \"User ratings exists for media.\") else: #create if", "media self.seconds = seconds db.create_all() # marshmallow schemas def userSignatureValidator(data): \"\"\" Validate user", "media_list_schema = MediaSchema(many=True) user_likes_schema = LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True) def allowed_file(filename): return '.'", "media.duration, \"match_time\": song['match_time'] } except Exception as e: return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500}", "= None if url != None: #fingerprint try: yt = YouTube(url) except Exception", "that don't exist if fingerprinter.ready(): if fingerprinter.successful(): result = fingerprinter.get() return jsonify(result['data']), result['code']", "not found.\") likes = (str(request.url_rule).split(\"/\")[-1] == \"likes\") if likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else:", "unavailable.\"}, \"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid", "return jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def fingerprintStatusApi(sid): \"\"\" Retrieve the status", "from celery import Celery, states from celery.exceptions import Ignore from flask_sqlalchemy import SQLAlchemy", "except ValidationError as err: return jsonify(err.messages), 400 user = Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user)", "Validate user signature. \"\"\" user = Users.query.filter_by(signature=data).first() if user != None: raise ValidationError('Please", "song: return {\"data\":{\"msg\":\"Media not found.\"}, \"code\": 404} return {\"data\":result, \"code\": 200} @app.route('/hello', methods=['GET'])", "config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO:", "in xrange(0, len(stream_list)): if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i] break; if stream", "author and time index of a sampled media. \"\"\" #TODO: Improve recognition if", "media.\") else: #create if likes: newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings =", "\"\"\" Convert json string to array before passing it to dump(). \"\"\" data.seconds", "from marshmallow import Schema, fields, ValidationError, pre_dump from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required,", "config = json.load(f) except IOError as err: print(\"Cannot open configuration: %s. Exiting\" %", "Likes & dislikes serialization/deserialization schema. \"\"\" #Discard seconds out of timer window user", "data = user_likes_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 if request.method ==", "+ get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self, media): \"\"\" Fingerprint and add a given media.", "500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath)", "Fingerprint and add a given media. \"\"\" url = media.get(\"url\", None) sid =", "200} @app.route('/hello', methods=['GET']) @jwt_required def helloApi(): \"\"\" Installation test. \"\"\" asynctask = testInstall.apply()", "db.session.commit() return jsonify({\"success\": True}) else: if not request.is_json or request.get_json() == None: abort(400,", "media mid. \"\"\" media = Media.query.get(mid) if not media: abort(404, \"Media not found.\")", "if not existingRatings: return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif request.method == 'DELETE': if not", "Return Boolean indicator if media is indexed. \"\"\" return not media.sid == None", "existingRatings: abort(409, \"User ratings exists for media.\") else: #create if likes: newRatings =", "\"\"\" Likes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key", "if not media: abort(404, \"Media not found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required def", "upload folders on app load if (pth.isdir(TMP_UPLOAD_FOLDER) == False): print \"Creating upload folder\"", "= sid class Likes(db.Model): \"\"\" Likes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True)", "Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create upload folders on app load if (pth.isdir(TMP_UPLOAD_FOLDER)", "b4 returning return jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def fingerprintStatusApi(sid): \"\"\" Retrieve", "\"\"\" asynctask = testInstall.apply() if asynctask.ready() and asynctask.successful(): return jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\",", "= user_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 user = Users(signature=data['signature']) db.session.add(user)", "sid class Likes(db.Model): \"\"\" Likes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent", "sid = djv.fingerprint_file(filepath) #os.remove(filepath) # rmv file after use except Exception as err:", "asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def fingerprintStatusApi(sid): \"\"\" Retrieve the status of a", "str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def mediaLikesApi(mid): \"\"\" Retrieve list of", "'PUT', 'DELETE']) @jwt_required def userLikesApi(mid, uid): \"\"\" Retrieve, add & modify the user", "ValidationError('Seconds cannot be empty.') class UserSchema(Schema): \"\"\" User serialization/deserialization schema. \"\"\" signature =", "Users(db.Model): \"\"\" Users model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) signature = db.Column(db.String(255),", "user, media, seconds): \"\"\" Initialize class. \"\"\" self.user = user self.media = media", "jsonify({\"uuid\": asynctask.task_id}), 202 abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def recognitionStatusApi(sid): \"\"\" Retieve", "of timer window user = fields.Int(required=True, dump_only=True) media = fields.Int(required=True, dump_only=True) seconds =", "pth from pytube import YouTube from pydub import AudioSegment from dejavu import Dejavu", "if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath) #os.remove(filepath)", "class MediaSchema(Schema): \"\"\" Media serialization/deserialization schema. \"\"\" id = fields.Int(required=True, dump_only=True) name =", "== DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i] break; if stream == None: return {\"data\":{\"msg\":\"Media stream", "e: return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500} if not song: return {\"data\":{\"msg\":\"Media not found.\"},", "list of user likes for a media. \"\"\" try: if Media.query.get(mid) == None:", "validate=userSignatureValidator) class MediaSchema(Schema): \"\"\" Media serialization/deserialization schema. \"\"\" id = fields.Int(required=True, dump_only=True) name", "user_likes_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 if request.method == 'POST': if", "req user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds = db.Column(db.JSON, nullable=False) def __init__(self, user,", "= db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine) __table__ = table id = table.c.song_id name =", "with open(configpath) as f: config = json.load(f) except IOError as err: print(\"Cannot open", "or user.id != uid: raise Exception except Exception as e: abort(401) try: if", "json from os import path as pth from pytube import YouTube from pydub", "\"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(255), nullable=False) duration = db.Column(db.BigInteger,", "def mediaTypeValidator(data): \"\"\" Validate media type. \"\"\" if data and data.lower() not in", "recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}), 202 abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def recognitionStatusApi(sid):", "return {\"data\": media_schema.dump(row), \"code\": 201} @clry.task(bind=True) def recognizeMedia(self, filepath): #TODO: Use sth better", "from sqlalchemy.dialects.mysql import MEDIUMINT from marshmallow import Schema, fields, ValidationError, pre_dump from flask_jwt_extended", "table.c.song_id name = table.c.song_name class Media(db.Model): \"\"\" Media model. \"\"\" id = db.Column(db.Integer,", "\"\"\" #Discard seconds out of timer window user = fields.Int(required=True, dump_only=True) media =", "abort(500, \"Error recognizing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required", "Generate using os db = SQLAlchemy(app) djv = Dejavu(config) jwt = JWTManager(app) clry", "indexed. \"\"\" return not media.sid == None class LikesDislikesSchema(Schema): \"\"\" Likes & dislikes", "db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}), 201 elif request.method == 'PUT':", "a sampled media. \"\"\" #TODO: Improve recognition if 'file' not in request.files: abort(400,", "\"\"\" self.name = name self.duration = duration self.author = author self.mtype = mtype", "= Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return", "\"code\": 409} row = Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row) return", "\"\"\" media = Media.query.get(mid) if not media: abort(404, \"Media not found.\") return jsonify(media_schema.dump(media))", "'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate using os", "media, seconds): \"\"\" Initialize class. \"\"\" self.user = user self.media = media self.seconds", "= request.get_json() try: data = user_likes_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400", "another signature.') def mediaTypeValidator(data): \"\"\" Validate media type. \"\"\" if data and data.lower()", "\"\"\" Retrieve the resource id, name, author and time index of a sampled", "primary_key=True, autoincrement=True) name = db.Column(db.String(255), nullable=False) duration = db.Column(db.BigInteger, nullable=False) author = db.Column(db.String(255),", "app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate using os db = SQLAlchemy(app) djv = Dejavu(config)", "stream_list[i] break; if stream == None: return {\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\": 500} if", "@jwt_required def userLikesApi(mid, uid): \"\"\" Retrieve, add & modify the user likes for", "selected file\") if file and allowed_file(file.filename): filename = secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'], filename)", "author = db.Column(db.String(255), nullable=False) mtype = db.Column(db.String(255), nullable=False) sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def", "os import json from os import path as pth from pytube import YouTube", "db.session.add(user) db.session.commit() db.session.refresh(user) token = create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required", "409} row = Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row) return {\"data\":", "(str(request.url_rule).split(\"/\")[-1] == \"likes\") if likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if", "def mediaLikesApi(mid): \"\"\" Retrieve list of user likes for a media. \"\"\" try:", "LikesDislikesSchema(many=True) def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS #TODO:", "stream == None: return {\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False):", "Dejavu from dejavu.recognize import FileRecognizer from flask import Flask, request, abort, jsonify from", "asynctask = testInstall.apply() if asynctask.ready() and asynctask.successful(): return jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\", 500)", "app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI']", "set(['mp3', 'webm', '3gp', 'ogg']) MEDIA_TYPES = ['television', 'movie', 'music'] def init_config(configpath): \"\"\" Load", "def helloApi(): \"\"\" Installation test. \"\"\" asynctask = testInstall.apply() if asynctask.ready() and asynctask.successful():", "\"\"\" Initialize class. \"\"\" self.name = name self.duration = duration self.author = author", "if url != None: #fingerprint try: yt = YouTube(url) except Exception as err:", "os import path as pth from pytube import YouTube from pydub import AudioSegment", "import Ignore from flask_sqlalchemy import SQLAlchemy from sqlalchemy.dialects.mysql import MEDIUMINT from marshmallow import", "db.Column(db.String(255), primary_key=True) def __init__(self, signature): \"\"\" Initialize class. \"\"\" self.signature = signature class", "Media.query.get(mid) == None: raise Exception except Exception as e: abort(404, \"Media not found.\")", "class IndexedMedia(db.Model): \"\"\" Map existing songs table to a db Model. \"\"\" table", "model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(255), nullable=False) duration =", "not found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required def mediaRecognitionApi(): \"\"\" Retrieve the resource", "likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating: jsonify([]) return", "= media.get(\"url\", None) sid = None if url != None: #fingerprint try: yt", "for a media. \"\"\" try: if Media.query.get(mid) == None: abort(404, \"Media not found.\")", "filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS #TODO: Increase no. workers @clry.task def testInstall():", "recognitionStatusApi(sid): \"\"\" Retieve the status of a recognition activity. \"\"\" recognizer = recognizeMedia.AsyncResult(sid)", "duration, author, mtype, sid): \"\"\" Initialize class. \"\"\" self.name = name self.duration =", "not provided.\") json_data = request.get_json() try: data = user_likes_schema.load(json_data) except ValidationError as err:", "a given media. \"\"\" url = media.get(\"url\", None) sid = None if url", "filepath) media = Media.query.filter_by(sid=song['song_id']).first() if media: print song['song_id'] result = { \"id\": media.id,", "or request.get_json() == None: abort(400, \"Json data not provided.\") json_data = request.get_json() try:", "from werkzeug.utils import secure_filename from celery import Celery, states from celery.exceptions import Ignore", "len(stream_list)): if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i] break; if stream == None:", "= stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath) #os.remove(filepath) # rmv file after use except Exception", "request.files: abort(400, \"No file.\") file = request.files['file'] if file.filename == '': abort(400, \"No", "as err: return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500} media['duration'] = int(yt.length) stream_list = yt.streams.filter(only_audio=True).all()", "name = fields.Str(required=True) author = fields.Str(required=True) duration = fields.Int(default=0, missing=0) mtype = fields.Str(required=True,", "{ \"id\": media.id, \"offset\": song['offset_seconds'], \"duration\": media.duration, \"match_time\": song['match_time'] } except Exception as", "after use except Exception as err: return {\"data\":{\"msg\":\"Unable to index media.\"}, \"code\": 500}", "duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row) return {\"data\": media_schema.dump(row), \"code\": 201} @clry.task(bind=True)", "int primary key req user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds = db.Column(db.JSON, nullable=False)", "index of a sampled media. \"\"\" #TODO: Improve recognition if 'file' not in", "\"Media not found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required def mediaRecognitionApi(): \"\"\" Retrieve the", "helloApi(): \"\"\" Installation test. \"\"\" asynctask = testInstall.apply() if asynctask.ready() and asynctask.successful(): return", "media.get(\"url\", None) sid = None if url != None: #fingerprint try: yt =", "400 if request.method == 'POST': if existingRatings: abort(409, \"User ratings exists for media.\")", "= request.get_json() try: data = media_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400", "url = media.get(\"url\", None) sid = None if url != None: #fingerprint try:", "asynctask.task_id}), 202 abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def recognitionStatusApi(sid): \"\"\" Retieve the", "provided.\") json_data = request.get_json() try: data = user_likes_schema.load(json_data) except ValidationError as err: return", "= Likes.query.filter_by(user=uid, media=mid) else: qresult = Dislikes.query.filter_by(user=uid, media=mid) existingRatings = qresult.first() if request.method", "from pydub import AudioSegment from dejavu import Dejavu from dejavu.recognize import FileRecognizer from", "0: return {\"data\":{\"msg\":\"Media already exists.\"}, \"code\": 409} row = Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'],", "user.id != uid: raise Exception except Exception as e: abort(401) try: if Media.query.get(mid)", "class. \"\"\" self.name = name self.duration = duration self.author = author self.mtype =", "request.files['file'] if file.filename == '': abort(400, \"No selected file\") if file and allowed_file(file.filename):", "#TODO: Use sth better than filenames result = {} try: song = djv.recognize(FileRecognizer,", "None class LikesDislikesSchema(Schema): \"\"\" Likes & dislikes serialization/deserialization schema. \"\"\" #Discard seconds out", "err: return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500} media['duration'] = int(yt.length) stream_list = yt.streams.filter(only_audio=True).all() stream", "Installation test. \"\"\" asynctask = testInstall.apply() if asynctask.ready() and asynctask.successful(): return jsonify({\"msg\": \"Success!\"})", "= recognizer.get() return jsonify(result['data']), result['code'] if recognizer.failed(): return abort(500, \"Error recognizing media.\") return", "SQLAlchemy models class Users(db.Model): \"\"\" Users model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True)", "& retrieve media. \"\"\" if request.method == 'GET': media_list = Media.query.order_by(Media.name).all() data =", "= fields.Url(load_only=True) indexed = fields.Method('check_indexed', dump_only=True) def check_indexed(self, media): \"\"\" Return Boolean indicator", "Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}), 201", "def recognitionStatusApi(sid): \"\"\" Retieve the status of a recognition activity. \"\"\" recognizer =", "import path as pth from pytube import YouTube from pydub import AudioSegment from", "@clry.task def testInstall(): \"\"\" Test installation. \"\"\" return \"Hello \" + get_jwt_identity() @clry.task(bind=True)", "Retrieve list of user likes for a media. \"\"\" try: if Media.query.get(mid) ==", "filename = secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask = recognizeMedia.delay(filepath) return jsonify({\"uuid\":", "class. \"\"\" self.user = user self.media = media self.seconds = seconds class Dislikes(db.Model):", "if Media.query.get(mid) == None: abort(404, \"Media not found.\") except Exception as e: abort(404,", "#TODO: Generate using os db = SQLAlchemy(app) djv = Dejavu(config) jwt = JWTManager(app)", "self.media = media self.seconds = seconds class Dislikes(db.Model): \"\"\" Dislikes model. \"\"\" media", "DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp', 'ogg']) MEDIA_TYPES = ['television', 'movie',", "self.name = name self.duration = duration self.author = author self.mtype = mtype self.sid", "= db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds = db.Column(db.JSON, nullable=False) def __init__(self, user, media, seconds):", "self.sid = sid class Likes(db.Model): \"\"\" Likes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'),", "database. \"\"\" if not request.is_json or request.get_json() == None: abort(400, \"Json data not", "likes is not empty. \"\"\" if not data or len(data) == 0: raise", "__init__(self, user, media, seconds): \"\"\" Initialize class. \"\"\" self.user = user self.media =", "recognizer.ready(): if recognizer.successful(): result = recognizer.get() return jsonify(result['data']), result['code'] if recognizer.failed(): return abort(500,", "db Model. \"\"\" table = db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine) __table__ = table id", "dump_only=True) media = fields.Int(required=True, dump_only=True) seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump def process_json(self,", "= djv.fingerprint_file(filepath) #os.remove(filepath) # rmv file after use except Exception as err: return", "dump_only=True) def check_indexed(self, media): \"\"\" Return Boolean indicator if media is indexed. \"\"\"", "['television', 'movie', 'music'] def init_config(configpath): \"\"\" Load config from a JSON file \"\"\"", "== \"likes\") if likes: qresult = Likes.query.filter_by(user=uid, media=mid) else: qresult = Dislikes.query.filter_by(user=uid, media=mid)", "except Exception as e: return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500} if not song: return", "{} try: song = djv.recognize(FileRecognizer, filepath) media = Media.query.filter_by(sid=song['song_id']).first() if media: print song['song_id']", "err: print(\"Cannot open configuration: %s. Exiting\" % (str(err))) return config config = init_config(\"CONFIG.json\")", "Exception as e: abort(401) try: if Media.query.get(mid) == None: raise Exception except Exception", "500} media['duration'] = int(yt.length) stream_list = yt.streams.filter(only_audio=True).all() stream = None for i in", "else: if not request.is_json or request.get_json() == None: abort(400, \"Json data not provided.\")", "import Schema, fields, ValidationError, pre_dump from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER", "dejavu import Dejavu from dejavu.recognize import FileRecognizer from flask import Flask, request, abort,", "'POST', 'PUT', 'DELETE']) @jwt_required def userLikesApi(mid, uid): \"\"\" Retrieve, add & modify the", "'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS']", "json_data = request.get_json() try: data = media_schema.load(json_data) except ValidationError as err: return jsonify(err.messages),", "rmv file after use except Exception as err: return {\"data\":{\"msg\":\"Unable to index media.\"},", "db.ForeignKey('songs.song_id')) def __init__(self, name, duration, author, mtype, sid): \"\"\" Initialize class. \"\"\" self.name", "Ensure celery always recieves task b4 returning return jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET'])", "print \"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models class Users(db.Model): \"\"\" Users model.", "= db.Column(db.String(255), primary_key=True) def __init__(self, signature): \"\"\" Initialize class. \"\"\" self.signature = signature", "Exiting\" % (str(err))) return config config = init_config(\"CONFIG.json\") app = Flask(__name__) app.config['UPLOAD_FOLDER'] =", "mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row) return {\"data\": media_schema.dump(row), \"code\": 201} @clry.task(bind=True) def recognizeMedia(self,", "media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid): \"\"\" Retrieve the details", "fields.Int(required=True, dump_only=True) seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump def process_json(self, data): \"\"\" Convert", "fingerprintMedia.delay(data) #TODO: Ensure celery always recieves task b4 returning return jsonify({\"uuid\": asynctask.task_id}), 202", "fields.Str(required=True) author = fields.Str(required=True) duration = fields.Int(default=0, missing=0) mtype = fields.Str(required=True, validate=mediaTypeValidator) url", "db.session.commit() db.session.refresh(user) token = create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required def", "as err: return jsonify(err.messages), 400 if request.method == 'POST': if existingRatings: abort(409, \"User", "else: newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return jsonify({\"user\": uid, \"media\": mid,", "on app load if (pth.isdir(TMP_UPLOAD_FOLDER) == False): print \"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) #", "def __init__(self, signature): \"\"\" Initialize class. \"\"\" self.signature = signature class IndexedMedia(db.Model): \"\"\"", "sids that don't exist if fingerprinter.ready(): if fingerprinter.successful(): result = fingerprinter.get() return jsonify(result['data']),", "abort(404, \"Ratings not found.\") else: #modify existingRatings.seconds = json.dumps(data[\"seconds\"]) db.session.commit() return jsonify({\"user\": uid,", "\"likes\") if likes: qresult = Likes.query.filter_by(user=uid, media=mid) else: qresult = Dislikes.query.filter_by(user=uid, media=mid) existingRatings", "fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that don't exist if fingerprinter.ready(): if fingerprinter.successful():", "e: abort(404, \"Media not found.\") likes = (str(request.url_rule).split(\"/\")[-2] == \"likes\") if likes: qresult", "upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models class Users(db.Model): \"\"\" Users model. \"\"\" id", "not data or len(data) == 0: raise ValidationError('Seconds cannot be empty.') class UserSchema(Schema):", "media: abort(404, \"Media not found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required def mediaRecognitionApi(): \"\"\"", "\"User ratings exists for media.\") else: #create if likes: newRatings = Likes(user=uid, media=mid,", "\"\"\" Fingerprint and add a given media. \"\"\" url = media.get(\"url\", None) sid", "'webm', '3gp', 'ogg']) MEDIA_TYPES = ['television', 'movie', 'music'] def init_config(configpath): \"\"\" Load config", "folders on app load if (pth.isdir(TMP_UPLOAD_FOLDER) == False): print \"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER)", "create upload folders on app load if (pth.isdir(TMP_UPLOAD_FOLDER) == False): print \"Creating upload", "empty.') class UserSchema(Schema): \"\"\" User serialization/deserialization schema. \"\"\" signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator)", "@app.route('/hello', methods=['GET']) @jwt_required def helloApi(): \"\"\" Installation test. \"\"\" asynctask = testInstall.apply() if", "add & modify the user likes for a particular media. \"\"\" try: user", "try: user = Users.query.filter_by(signature=get_jwt_identity()).first() if user == None or user.id != uid: raise", "None or user.id != uid: raise Exception except Exception as e: abort(401) try:", "if not existingRatings: abort(404, \"Ratings not found.\") qresult.delete() db.session.commit() return jsonify({\"success\": True}) else:", "None: abort(400, \"Json data not provided.\") json_data = request.get_json() try: data = user_likes_schema.load(json_data)", "seconds): \"\"\" Initialize class. \"\"\" self.user = user self.media = media self.seconds =", "user signature. \"\"\" user = Users.query.filter_by(signature=data).first() if user != None: raise ValidationError('Please provide", "load if (pth.isdir(TMP_UPLOAD_FOLDER) == False): print \"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models", "\"\"\" if data and data.lower() not in MEDIA_TYPES: raise ValidationError('Mtype is invalid.') def", "Exception as e: abort(404, \"Ratings not found.\") likes = (str(request.url_rule).split(\"/\")[-1] == \"likes\") if", "'file' not in request.files: abort(400, \"No file.\") file = request.files['file'] if file.filename ==", "fields.Str(required=True, load_only=True, validate=userSignatureValidator) class MediaSchema(Schema): \"\"\" Media serialization/deserialization schema. \"\"\" id = fields.Int(required=True,", "= table.c.song_id name = table.c.song_name class Media(db.Model): \"\"\" Media model. \"\"\" id =", "= stream_list[i] break; if stream == None: return {\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\": 500}", "= Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating: jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>',", "return jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}), 201 elif request.method == 'PUT': if", "fields.Url(load_only=True) indexed = fields.Method('check_indexed', dump_only=True) def check_indexed(self, media): \"\"\" Return Boolean indicator if", "author, mtype, sid): \"\"\" Initialize class. \"\"\" self.name = name self.duration = duration", "= fingerprintMedia.delay(data) #TODO: Ensure celery always recieves task b4 returning return jsonify({\"uuid\": asynctask.task_id}),", "self.seconds = seconds class Dislikes(db.Model): \"\"\" Dislikes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'),", "song = djv.recognize(FileRecognizer, filepath) media = Media.query.filter_by(sid=song['song_id']).first() if media: print song['song_id'] result =", "result = recognizer.get() return jsonify(result['data']), result['code'] if recognizer.failed(): return abort(500, \"Error recognizing media.\")", "config = init_config(\"CONFIG.json\") app = Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd'])", "autoload=True, autoload_with=db.engine) __table__ = table id = table.c.song_id name = table.c.song_name class Media(db.Model):", "return \"Hello \" + get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self, media): \"\"\" Fingerprint and add", "Likes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req", "db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds = db.Column(db.JSON, nullable=False) def __init__(self, user, media, seconds): \"\"\"", "@app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def fingerprintStatusApi(sid): \"\"\" Retrieve the status of a fingerprinting task.", "user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds = db.Column(db.JSON, nullable=False) def __init__(self, user, media,", "= fields.Int(required=True, dump_only=True) media = fields.Int(required=True, dump_only=True) seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump", "return {\"data\":{\"msg\":\"Unable to index media.\"}, \"code\": 500} if sid <= 0: return {\"data\":{\"msg\":\"Media", "abort(404, \"Media not found.\") except Exception as e: abort(404, \"Ratings not found.\") likes", "methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @jwt_required def userLikesApi(mid, uid):", "id = db.Column(db.Integer, primary_key=True, autoincrement=True) signature = db.Column(db.String(255), primary_key=True) def __init__(self, signature): \"\"\"", "\"\"\" Validate media type. \"\"\" if data and data.lower() not in MEDIA_TYPES: raise", "fields.Int(required=True, dump_only=True) name = fields.Str(required=True) author = fields.Str(required=True) duration = fields.Int(default=0, missing=0) mtype", "@pre_dump def process_json(self, data): \"\"\" Convert json string to array before passing it", "= UserSchema() media_schema = MediaSchema() media_list_schema = MediaSchema(many=True) user_likes_schema = LikesDislikesSchema() media_likes_schema =", "found.\"}, \"code\": 404} return {\"data\":result, \"code\": 200} @app.route('/hello', methods=['GET']) @jwt_required def helloApi(): \"\"\"", "the details for the media mid. \"\"\" media = Media.query.get(mid) if not media:", "in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS #TODO: Increase no. workers @clry.task def", "Retieve the status of a recognition activity. \"\"\" recognizer = recognizeMedia.AsyncResult(sid) if recognizer.ready():", "500} if not song: return {\"data\":{\"msg\":\"Media not found.\"}, \"code\": 404} return {\"data\":result, \"code\":", "sqlalchemy.dialects.mysql import MEDIUMINT from marshmallow import Schema, fields, ValidationError, pre_dump from flask_jwt_extended import", "if file.filename == '': abort(400, \"No selected file\") if file and allowed_file(file.filename): filename", "self.author = author self.mtype = mtype self.sid = sid class Likes(db.Model): \"\"\" Likes", "Users.query.filter_by(signature=data).first() if user != None: raise ValidationError('Please provide another signature.') def mediaTypeValidator(data): \"\"\"", "not in MEDIA_TYPES: raise ValidationError('Mtype is invalid.') def emptyLikesValidator(data): \"\"\" Ensure likes is", "@app.route('/media/recognize', methods=['POST']) @jwt_required def mediaRecognitionApi(): \"\"\" Retrieve the resource id, name, author and", "True}) else: if not request.is_json or request.get_json() == None: abort(400, \"Json data not", "if request.method == 'GET': if not existingRatings: return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif request.method", "= db.Column(db.BigInteger, nullable=False) author = db.Column(db.String(255), nullable=False) mtype = db.Column(db.String(255), nullable=False) sid =", "Exception as err: return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500} media['duration'] = int(yt.length) stream_list =", "\"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req user =", "sth better than filenames result = {} try: song = djv.recognize(FileRecognizer, filepath) media", "signature class IndexedMedia(db.Model): \"\"\" Map existing songs table to a db Model. \"\"\"", "if recognizer.ready(): if recognizer.successful(): result = recognizer.get() return jsonify(result['data']), result['code'] if recognizer.failed(): return", "to dump(). \"\"\" data.seconds = json.loads(data.seconds) return data user_schema = UserSchema() media_schema =", "is not empty. \"\"\" if not data or len(data) == 0: raise ValidationError('Seconds", "data not provided.\") json_data = request.get_json() try: data = media_schema.load(json_data) except ValidationError as", "media type. \"\"\" if data and data.lower() not in MEDIA_TYPES: raise ValidationError('Mtype is", "try: yt = YouTube(url) except Exception as err: return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500}", "signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator) class MediaSchema(Schema): \"\"\" Media serialization/deserialization schema. \"\"\" id", "Schema, fields, ValidationError, pre_dump from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER =", "and add a given media. \"\"\" url = media.get(\"url\", None) sid = None", "qresult = Dislikes.query.filter_by(user=uid, media=mid) existingRatings = qresult.first() if request.method == 'GET': if not", "dump_only=True) seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump def process_json(self, data): \"\"\" Convert json", "db.Column(db.JSON, nullable=False) def __init__(self, user, media, seconds): \"\"\" Initialize class. \"\"\" self.user =", "for the media mid. \"\"\" media = Media.query.get(mid) if not media: abort(404, \"Media", "class. \"\"\" self.signature = signature class IndexedMedia(db.Model): \"\"\" Map existing songs table to", "installation\", 500) @app.route('/register', methods=['POST']) def registerApi(): \"\"\" Add a user to the database.", "registerApi(): \"\"\" Add a user to the database. \"\"\" if not request.is_json or", "= signature class IndexedMedia(db.Model): \"\"\" Map existing songs table to a db Model.", "return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500} if not song: return {\"data\":{\"msg\":\"Media not found.\"}, \"code\":", "data = user_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 user = Users(signature=data['signature'])", "data user_schema = UserSchema() media_schema = MediaSchema() media_list_schema = MediaSchema(many=True) user_likes_schema = LikesDislikesSchema()", "FileRecognizer from flask import Flask, request, abort, jsonify from werkzeug.utils import secure_filename from", "'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @jwt_required def userLikesApi(mid, uid): \"\"\" Retrieve, add", "indicator if media is indexed. \"\"\" return not media.sid == None class LikesDislikesSchema(Schema):", "@app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @jwt_required def userLikesApi(mid,", "song['match_time'] } except Exception as e: return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500} if not", "db.session.refresh(row) return {\"data\": media_schema.dump(row), \"code\": 201} @clry.task(bind=True) def recognizeMedia(self, filepath): #TODO: Use sth", "from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/'", "and asynctask.successful(): return jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\", 500) @app.route('/register', methods=['POST']) def registerApi(): \"\"\"", "Retrieve the resource id, name, author and time index of a sampled media.", "likes = (str(request.url_rule).split(\"/\")[-1] == \"likes\") if likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating =", "Initialize class. \"\"\" self.user = user self.media = media self.seconds = seconds class", "passing it to dump(). \"\"\" data.seconds = json.loads(data.seconds) return data user_schema = UserSchema()", "Media(db.Model): \"\"\" Media model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(255),", "== 'GET': media_list = Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list) return jsonify(data), 200 elif request.method", "db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(255), nullable=False) duration = db.Column(db.BigInteger, nullable=False) author =", "file\") if file and allowed_file(file.filename): filename = secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath)", "methods=['GET']) @jwt_required def fingerprintStatusApi(sid): \"\"\" Retrieve the status of a fingerprinting task. \"\"\"", "def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS #TODO: Increase", "the user likes for a particular media. \"\"\" try: user = Users.query.filter_by(signature=get_jwt_identity()).first() if", "a user to the database. \"\"\" if not request.is_json or request.get_json() == None:", "#TODO: Improve recognition if 'file' not in request.files: abort(400, \"No file.\") file =", "os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath) #os.remove(filepath) # rmv file after", "None: abort(404, \"Media not found.\") except Exception as e: abort(404, \"Ratings not found.\")", "seconds db.create_all() # marshmallow schemas def userSignatureValidator(data): \"\"\" Validate user signature. \"\"\" user", "JWTManager(app) clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create upload folders on app", "return abort(500, \"Error recognizing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET'])", "def __init__(self, name, duration, author, mtype, sid): \"\"\" Initialize class. \"\"\" self.name =", "= qresult.first() if request.method == 'GET': if not existingRatings: return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings))", "user likes for a media. \"\"\" try: if Media.query.get(mid) == None: abort(404, \"Media", "request.method == 'DELETE': if not existingRatings: abort(404, \"Ratings not found.\") qresult.delete() db.session.commit() return", "json.loads(data.seconds) return data user_schema = UserSchema() media_schema = MediaSchema() media_list_schema = MediaSchema(many=True) user_likes_schema", "raise ValidationError('Seconds cannot be empty.') class UserSchema(Schema): \"\"\" User serialization/deserialization schema. \"\"\" signature", "@app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @jwt_required def userLikesApi(mid, uid): \"\"\" Retrieve, add &", "file and allowed_file(file.filename): filename = secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask =", "500} if sid <= 0: return {\"data\":{\"msg\":\"Media already exists.\"}, \"code\": 409} row =", "qresult.first() if request.method == 'GET': if not existingRatings: return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif", "Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row) return {\"data\": media_schema.dump(row), \"code\": 201}", "\"Error recognizing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def", "import Flask, request, abort, jsonify from werkzeug.utils import secure_filename from celery import Celery,", "\"Json data not provided.\") json_data = request.get_json() try: data = user_schema.load(json_data) except ValidationError", "workers @clry.task def testInstall(): \"\"\" Test installation. \"\"\" return \"Hello \" + get_jwt_identity()", "methods=['GET','POST']) @jwt_required def mediaApi(): \"\"\" Add & retrieve media. \"\"\" if request.method ==", "likes: qresult = Likes.query.filter_by(user=uid, media=mid) else: qresult = Dislikes.query.filter_by(user=uid, media=mid) existingRatings = qresult.first()", "Initialize class. \"\"\" self.signature = signature class IndexedMedia(db.Model): \"\"\" Map existing songs table", "seconds out of timer window user = fields.Int(required=True, dump_only=True) media = fields.Int(required=True, dump_only=True)", "Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host'])", "modify the user likes for a particular media. \"\"\" try: user = Users.query.filter_by(signature=get_jwt_identity()).first()", "\"\"\" if not data or len(data) == 0: raise ValidationError('Seconds cannot be empty.')", "mtype = db.Column(db.String(255), nullable=False) sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self, name, duration, author,", "newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit()", "key req user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds = db.Column(db.JSON, nullable=False) def __init__(self,", "{\"data\":{\"msg\":\"Unable to index media.\"}, \"code\": 500} if sid <= 0: return {\"data\":{\"msg\":\"Media already", "invalid.') def emptyLikesValidator(data): \"\"\" Ensure likes is not empty. \"\"\" if not data", "fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that don't exist if fingerprinter.ready(): if fingerprinter.successful(): result =", "#modify existingRatings.seconds = json.dumps(data[\"seconds\"]) db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}), 200", "particular media. \"\"\" try: user = Users.query.filter_by(signature=get_jwt_identity()).first() if user == None or user.id", "Dislikes.query.filter_by(user=uid, media=mid) existingRatings = qresult.first() if request.method == 'GET': if not existingRatings: return", "= fields.Method('check_indexed', dump_only=True) def check_indexed(self, media): \"\"\" Return Boolean indicator if media is", "== 'PUT': if not existingRatings: abort(404, \"Ratings not found.\") else: #modify existingRatings.seconds =", "err: return jsonify(err.messages), 400 if request.method == 'POST': if existingRatings: abort(409, \"User ratings", "= fields.Str(required=True) author = fields.Str(required=True) duration = fields.Int(default=0, missing=0) mtype = fields.Str(required=True, validate=mediaTypeValidator)", "id = table.c.song_id name = table.c.song_name class Media(db.Model): \"\"\" Media model. \"\"\" id", "return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid): \"\"\" Retrieve the details for", "rating: jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT',", "return jsonify(result['data']), result['code'] if recognizer.failed(): return abort(500, \"Error recognizing media.\") return jsonify({\"uuid\": str(sid)}),", "Likes.query.filter_by(user=uid, media=mid) else: qresult = Dislikes.query.filter_by(user=uid, media=mid) existingRatings = qresult.first() if request.method ==", "from flask_sqlalchemy import SQLAlchemy from sqlalchemy.dialects.mysql import MEDIUMINT from marshmallow import Schema, fields,", "class Users(db.Model): \"\"\" Users model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) signature =", "ValidationError, pre_dump from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER", "backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create upload folders on app load if (pth.isdir(TMP_UPLOAD_FOLDER) ==", "def mediaItemApi(mid): \"\"\" Retrieve the details for the media mid. \"\"\" media =", "likes = (str(request.url_rule).split(\"/\")[-2] == \"likes\") if likes: qresult = Likes.query.filter_by(user=uid, media=mid) else: qresult", "= table.c.song_name class Media(db.Model): \"\"\" Media model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True)", "as pth from pytube import YouTube from pydub import AudioSegment from dejavu import", "(pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath) #os.remove(filepath) #", "None for i in xrange(0, len(stream_list)): if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i]", "Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user) token = create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST'])", "out of timer window user = fields.Int(required=True, dump_only=True) media = fields.Int(required=True, dump_only=True) seconds", "returning return jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def fingerprintStatusApi(sid): \"\"\" Retrieve the", "@app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def mediaLikesApi(mid): \"\"\" Retrieve list of user likes for a", "userSignatureValidator(data): \"\"\" Validate user signature. \"\"\" user = Users.query.filter_by(signature=data).first() if user != None:", "as err: print(\"Cannot open configuration: %s. Exiting\" % (str(err))) return config config =", "states from celery.exceptions import Ignore from flask_sqlalchemy import SQLAlchemy from sqlalchemy.dialects.mysql import MEDIUMINT", "break; if stream == None: return {\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER)", "duration = fields.Int(default=0, missing=0) mtype = fields.Str(required=True, validate=mediaTypeValidator) url = fields.Url(load_only=True) indexed =", "jsonify({\"success\": True}) else: if not request.is_json or request.get_json() == None: abort(400, \"Json data", "fingerprinter.get() return jsonify(result['data']), result['code'] if fingerprinter.failed(): return abort(500, \"Error indexing media.\") return jsonify({\"uuid\":", "media: print song['song_id'] result = { \"id\": media.id, \"offset\": song['offset_seconds'], \"duration\": media.duration, \"match_time\":", "db.session.commit() db.session.refresh(row) return {\"data\": media_schema.dump(row), \"code\": 201} @clry.task(bind=True) def recognizeMedia(self, filepath): #TODO: Use", "\"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) signature = db.Column(db.String(255), primary_key=True) def __init__(self, signature):", "expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required def mediaApi(): \"\"\" Add & retrieve", "= create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required def mediaApi(): \"\"\" Add", "return jsonify(err.messages), 400 asynctask = fingerprintMedia.delay(data) #TODO: Ensure celery always recieves task b4", "if user == None or user.id != uid: raise Exception except Exception as", "__init__(self, name, duration, author, mtype, sid): \"\"\" Initialize class. \"\"\" self.name = name", "\"Hello \" + get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self, media): \"\"\" Fingerprint and add a", "except Exception as e: abort(404, \"Media not found.\") likes = (str(request.url_rule).split(\"/\")[-2] == \"likes\")", "Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating: jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET',", "a db Model. \"\"\" table = db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine) __table__ = table", "found.\") likes = (str(request.url_rule).split(\"/\")[-2] == \"likes\") if likes: qresult = Likes.query.filter_by(user=uid, media=mid) else:", "400 user = Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user) token = create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id,", "\"id\": media.id, \"offset\": song['offset_seconds'], \"duration\": media.duration, \"match_time\": song['match_time'] } except Exception as e:", "= user_likes_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 if request.method == 'POST':", "abort(400, \"Json data not provided.\") json_data = request.get_json() try: data = user_schema.load(json_data) except", "the database. \"\"\" if not request.is_json or request.get_json() == None: abort(400, \"Json data", "request.get_json() try: data = media_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 asynctask", "abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def recognitionStatusApi(sid): \"\"\" Retieve the status of", "@jwt_required def mediaLikesApi(mid): \"\"\" Retrieve list of user likes for a media. \"\"\"", "name = table.c.song_name class Media(db.Model): \"\"\" Media model. \"\"\" id = db.Column(db.Integer, primary_key=True,", "'POST': if existingRatings: abort(409, \"User ratings exists for media.\") else: #create if likes:", "'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY'] =", "not found.\") else: #modify existingRatings.seconds = json.dumps(data[\"seconds\"]) db.session.commit() return jsonify({\"user\": uid, \"media\": mid,", "== None: abort(400, \"Json data not provided.\") json_data = request.get_json() try: data =", "os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models class Users(db.Model): \"\"\" Users model. \"\"\" id = db.Column(db.Integer,", "except ValidationError as err: return jsonify(err.messages), 400 if request.method == 'POST': if existingRatings:", "data = media_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 asynctask = fingerprintMedia.delay(data)", "fingerprinter.successful(): result = fingerprinter.get() return jsonify(result['data']), result['code'] if fingerprinter.failed(): return abort(500, \"Error indexing", "app load if (pth.isdir(TMP_UPLOAD_FOLDER) == False): print \"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy", "user to the database. \"\"\" if not request.is_json or request.get_json() == None: abort(400,", "sid = None if url != None: #fingerprint try: yt = YouTube(url) except", "yt = YouTube(url) except Exception as err: return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500} media['duration']", "signature. \"\"\" user = Users.query.filter_by(signature=data).first() if user != None: raise ValidationError('Please provide another", "Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating: jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>',", "stream = None for i in xrange(0, len(stream_list)): if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream", "None) sid = None if url != None: #fingerprint try: yt = YouTube(url)", "else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating: jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST',", "app = Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'],", "int(yt.length) stream_list = yt.streams.filter(only_audio=True).all() stream = None for i in xrange(0, len(stream_list)): if", "data = media_list_schema.dump(media_list) return jsonify(data), 200 elif request.method == 'POST': if not request.is_json", "jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required def mediaRecognitionApi(): \"\"\" Retrieve the resource id, name, author", "name, duration, author, mtype, sid): \"\"\" Initialize class. \"\"\" self.name = name self.duration", "# create upload folders on app load if (pth.isdir(TMP_UPLOAD_FOLDER) == False): print \"Creating", "user_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 user = Users(signature=data['signature']) db.session.add(user) db.session.commit()", "index media.\"}, \"code\": 500} if sid <= 0: return {\"data\":{\"msg\":\"Media already exists.\"}, \"code\":", "202 abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def recognitionStatusApi(sid): \"\"\" Retieve the status", "request.get_json() try: data = user_likes_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 if", "\"No file.\") file = request.files['file'] if file.filename == '': abort(400, \"No selected file\")", "better than filenames result = {} try: song = djv.recognize(FileRecognizer, filepath) media =", "if existingRatings: abort(409, \"User ratings exists for media.\") else: #create if likes: newRatings", "== None: return {\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER)", "200 elif request.method == 'POST': if not request.is_json or request.get_json() == None: abort(400,", "media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def mediaLikesApi(mid): \"\"\"", "werkzeug.utils import secure_filename from celery import Celery, states from celery.exceptions import Ignore from", "return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required def mediaApi(): \"\"\" Add & retrieve media.", "a JSON file \"\"\" try: with open(configpath) as f: config = json.load(f) except", "Users.query.filter_by(signature=get_jwt_identity()).first() if user == None or user.id != uid: raise Exception except Exception", "fingerprinter.failed(): return abort(500, \"Error indexing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET']) def", "if file and allowed_file(file.filename): filename = secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask", "sampled media. \"\"\" #TODO: Improve recognition if 'file' not in request.files: abort(400, \"No", "self.user = user self.media = media self.seconds = seconds class Dislikes(db.Model): \"\"\" Dislikes", "return data user_schema = UserSchema() media_schema = MediaSchema() media_list_schema = MediaSchema(many=True) user_likes_schema =", "Dejavu(config) jwt = JWTManager(app) clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create upload", "Exception as err: return {\"data\":{\"msg\":\"Unable to index media.\"}, \"code\": 500} if sid <=", "provided.\") json_data = request.get_json() try: data = media_schema.load(json_data) except ValidationError as err: return", "request.method == 'PUT': if not existingRatings: abort(404, \"Ratings not found.\") else: #modify existingRatings.seconds", "202 @app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid): \"\"\" Retrieve the details for the media mid.", "= db.Column(db.String(255), nullable=False) sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self, name, duration, author, mtype,", "__table__ = table id = table.c.song_id name = table.c.song_name class Media(db.Model): \"\"\" Media", "Validate media type. \"\"\" if data and data.lower() not in MEDIA_TYPES: raise ValidationError('Mtype", "result = {} try: song = djv.recognize(FileRecognizer, filepath) media = Media.query.filter_by(sid=song['song_id']).first() if media:", "not empty. \"\"\" if not data or len(data) == 0: raise ValidationError('Seconds cannot", "recognizer.successful(): result = recognizer.get() return jsonify(result['data']), result['code'] if recognizer.failed(): return abort(500, \"Error recognizing", "= Dejavu(config) jwt = JWTManager(app) clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create", "\"Ratings not found.\") likes = (str(request.url_rule).split(\"/\")[-1] == \"likes\") if likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all()", "import FileRecognizer from flask import Flask, request, abort, jsonify from werkzeug.utils import secure_filename", "jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif request.method == 'DELETE': if not existingRatings: abort(404, \"Ratings not", "abort, jsonify from werkzeug.utils import secure_filename from celery import Celery, states from celery.exceptions", "Add & retrieve media. \"\"\" if request.method == 'GET': media_list = Media.query.order_by(Media.name).all() data", "user = Users.query.filter_by(signature=data).first() if user != None: raise ValidationError('Please provide another signature.') def", "mid, \"seconds\": data[\"seconds\"]}), 201 elif request.method == 'PUT': if not existingRatings: abort(404, \"Ratings", "dump_only=True) name = fields.Str(required=True) author = fields.Str(required=True) duration = fields.Int(default=0, missing=0) mtype =", "activity. \"\"\" recognizer = recognizeMedia.AsyncResult(sid) if recognizer.ready(): if recognizer.successful(): result = recognizer.get() return", "Exception except Exception as e: abort(404, \"Media not found.\") likes = (str(request.url_rule).split(\"/\")[-2] ==", "if fingerprinter.ready(): if fingerprinter.successful(): result = fingerprinter.get() return jsonify(result['data']), result['code'] if fingerprinter.failed(): return", "using os db = SQLAlchemy(app) djv = Dejavu(config) jwt = JWTManager(app) clry =", "& dislikes serialization/deserialization schema. \"\"\" #Discard seconds out of timer window user =", "nullable=False) duration = db.Column(db.BigInteger, nullable=False) author = db.Column(db.String(255), nullable=False) mtype = db.Column(db.String(255), nullable=False)", "= seconds class Dislikes(db.Model): \"\"\" Dislikes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True)", "flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT", "already exists.\"}, \"code\": 409} row = Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit()", "for a particular media. \"\"\" try: user = Users.query.filter_by(signature=get_jwt_identity()).first() if user == None", "Convert json string to array before passing it to dump(). \"\"\" data.seconds =", "= author self.mtype = mtype self.sid = sid class Likes(db.Model): \"\"\" Likes model.", "False): print \"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models class Users(db.Model): \"\"\" Users", "@clry.task(bind=True) def fingerprintMedia(self, media): \"\"\" Fingerprint and add a given media. \"\"\" url", "\"\"\" self.user = user self.media = media self.seconds = seconds class Dislikes(db.Model): \"\"\"", "config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate using os db", "mtype, sid): \"\"\" Initialize class. \"\"\" self.name = name self.duration = duration self.author", "Add a user to the database. \"\"\" if not request.is_json or request.get_json() ==", "media = Media.query.get(mid) if not media: abort(404, \"Media not found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize',", "data.lower() not in MEDIA_TYPES: raise ValidationError('Mtype is invalid.') def emptyLikesValidator(data): \"\"\" Ensure likes", "from celery.exceptions import Ignore from flask_sqlalchemy import SQLAlchemy from sqlalchemy.dialects.mysql import MEDIUMINT from", "== False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath) #os.remove(filepath) # rmv", "rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating: jsonify([]) return jsonify(media_likes_schema.dump(rating))", "config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] =", "self.media = media self.seconds = seconds db.create_all() # marshmallow schemas def userSignatureValidator(data): \"\"\"", "= media self.seconds = seconds db.create_all() # marshmallow schemas def userSignatureValidator(data): \"\"\" Validate", "= (str(request.url_rule).split(\"/\")[-2] == \"likes\") if likes: qresult = Likes.query.filter_by(user=uid, media=mid) else: qresult =", "= init_config(\"CONFIG.json\") app = Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND']", "marshmallow schemas def userSignatureValidator(data): \"\"\" Validate user signature. \"\"\" user = Users.query.filter_by(signature=data).first() if", "JSON file \"\"\" try: with open(configpath) as f: config = json.load(f) except IOError", "signature): \"\"\" Initialize class. \"\"\" self.signature = signature class IndexedMedia(db.Model): \"\"\" Map existing", "song['offset_seconds'], \"duration\": media.duration, \"match_time\": song['match_time'] } except Exception as e: return {\"data\":{\"msg\":\"Recognition failed.\"},", "existingRatings: abort(404, \"Ratings not found.\") qresult.delete() db.session.commit() return jsonify({\"success\": True}) else: if not", "except Exception as e: abort(404, \"Ratings not found.\") likes = (str(request.url_rule).split(\"/\")[-1] == \"likes\")", "@jwt_required def fingerprintStatusApi(sid): \"\"\" Retrieve the status of a fingerprinting task. \"\"\" fingerprinter", "= int(yt.length) stream_list = yt.streams.filter(only_audio=True).all() stream = None for i in xrange(0, len(stream_list)):", "\"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def recognitionStatusApi(sid): \"\"\" Retieve the status of a", "found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required def mediaRecognitionApi(): \"\"\" Retrieve the resource id,", "TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS = set(['mp3', 'webm',", "except Exception as e: abort(401) try: if Media.query.get(mid) == None: raise Exception except", "return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS #TODO: Increase no. workers", "as err: return jsonify(err.messages), 400 user = Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user) token =", "seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump def process_json(self, data): \"\"\" Convert json string", "clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create upload folders on app load", "status of a recognition activity. \"\"\" recognizer = recognizeMedia.AsyncResult(sid) if recognizer.ready(): if recognizer.successful():", "celery always recieves task b4 returning return jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required", "qresult.delete() db.session.commit() return jsonify({\"success\": True}) else: if not request.is_json or request.get_json() == None:", "'super-secret' #TODO: Generate using os db = SQLAlchemy(app) djv = Dejavu(config) jwt =", "'.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp', 'ogg']) MEDIA_TYPES = ['television',", "Exception as e: return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500} if not song: return {\"data\":{\"msg\":\"Media", "1)[1].lower() in ALLOWED_EXTENSIONS #TODO: Increase no. workers @clry.task def testInstall(): \"\"\" Test installation.", "IndexedMedia(db.Model): \"\"\" Map existing songs table to a db Model. \"\"\" table =", "Dislikes(db.Model): \"\"\" Dislikes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary", "fingerprinting task. \"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that don't exist if", "media_likes_schema = LikesDislikesSchema(many=True) def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in", "validate=emptyLikesValidator) @pre_dump def process_json(self, data): \"\"\" Convert json string to array before passing", "= table id = table.c.song_id name = table.c.song_name class Media(db.Model): \"\"\" Media model.", "allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS #TODO: Increase no.", "Retrieve the status of a fingerprinting task. \"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle", "= Media.query.get(mid) if not media: abort(404, \"Media not found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST'])", "stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath) #os.remove(filepath) # rmv file after use except Exception as", "file \"\"\" try: with open(configpath) as f: config = json.load(f) except IOError as", "return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif request.method == 'DELETE': if not existingRatings: abort(404, \"Ratings", "raise ValidationError('Mtype is invalid.') def emptyLikesValidator(data): \"\"\" Ensure likes is not empty. \"\"\"", "self.signature = signature class IndexedMedia(db.Model): \"\"\" Map existing songs table to a db", "None: raise ValidationError('Please provide another signature.') def mediaTypeValidator(data): \"\"\" Validate media type. \"\"\"", "id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(255), nullable=False) duration = db.Column(db.BigInteger, nullable=False)", "return jsonify(data), 200 elif request.method == 'POST': if not request.is_json or request.get_json() ==", "json_data = request.get_json() try: data = user_likes_schema.load(json_data) except ValidationError as err: return jsonify(err.messages),", "media. \"\"\" try: user = Users.query.filter_by(signature=get_jwt_identity()).first() if user == None or user.id !=", "== 'DELETE': if not existingRatings: abort(404, \"Ratings not found.\") qresult.delete() db.session.commit() return jsonify({\"success\":", "= TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] =", "init_config(configpath): \"\"\" Load config from a JSON file \"\"\" try: with open(configpath) as", "existingRatings: return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif request.method == 'DELETE': if not existingRatings: abort(404,", "SQLAlchemy from sqlalchemy.dialects.mysql import MEDIUMINT from marshmallow import Schema, fields, ValidationError, pre_dump from", "Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return jsonify({\"user\":", "(pth.isdir(TMP_UPLOAD_FOLDER) == False): print \"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models class Users(db.Model):", "clry.conf.update(app.config) # create upload folders on app load if (pth.isdir(TMP_UPLOAD_FOLDER) == False): print", "provided.\") json_data = request.get_json() try: data = user_schema.load(json_data) except ValidationError as err: return", "if recognizer.failed(): return abort(500, \"Error recognizing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET'])", "abort(404, \"Ratings not found.\") likes = (str(request.url_rule).split(\"/\")[-1] == \"likes\") if likes: rating =", "and allowed_file(file.filename): filename = secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask = recognizeMedia.delay(filepath)", "file.\") file = request.files['file'] if file.filename == '': abort(400, \"No selected file\") if", "token = create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required def mediaApi(): \"\"\"", "IOError as err: print(\"Cannot open configuration: %s. Exiting\" % (str(err))) return config config", "jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid): \"\"\" Retrieve the details for the", "\"\"\" Map existing songs table to a db Model. \"\"\" table = db.Table(\"songs\",", "create_access_token, get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm'", "err: return jsonify(err.messages), 400 user = Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user) token = create_access_token(identity=data['signature'],", "= Users.query.filter_by(signature=get_jwt_identity()).first() if user == None or user.id != uid: raise Exception except", "self.mtype = mtype self.sid = sid class Likes(db.Model): \"\"\" Likes model. \"\"\" media", "from pytube import YouTube from pydub import AudioSegment from dejavu import Dejavu from", "def check_indexed(self, media): \"\"\" Return Boolean indicator if media is indexed. \"\"\" return", "db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self, name, duration, author, mtype, sid): \"\"\" Initialize class. \"\"\"", "return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required def mediaRecognitionApi(): \"\"\" Retrieve the resource id, name,", "\"\"\" User serialization/deserialization schema. \"\"\" signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator) class MediaSchema(Schema): \"\"\"", "#Discard seconds out of timer window user = fields.Int(required=True, dump_only=True) media = fields.Int(required=True,", "methods=['GET']) @jwt_required def mediaLikesApi(mid): \"\"\" Retrieve list of user likes for a media.", "\"code\": 500} if not song: return {\"data\":{\"msg\":\"Media not found.\"}, \"code\": 404} return {\"data\":result,", "user == None or user.id != uid: raise Exception except Exception as e:", "userLikesApi(mid, uid): \"\"\" Retrieve, add & modify the user likes for a particular", "media): \"\"\" Fingerprint and add a given media. \"\"\" url = media.get(\"url\", None)", "sid): \"\"\" Initialize class. \"\"\" self.name = name self.duration = duration self.author =", "methods=['GET']) @jwt_required def recognitionStatusApi(sid): \"\"\" Retieve the status of a recognition activity. \"\"\"", "asynctask.ready() and asynctask.successful(): return jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\", 500) @app.route('/register', methods=['POST']) def registerApi():", "str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid): \"\"\" Retrieve the details for the media", "abort(500, \"Error indexing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid): \"\"\"", "open configuration: %s. Exiting\" % (str(err))) return config config = init_config(\"CONFIG.json\") app =", "flask import Flask, request, abort, jsonify from werkzeug.utils import secure_filename from celery import", "\"\"\" self.user = user self.media = media self.seconds = seconds db.create_all() # marshmallow", "existingRatings: abort(404, \"Ratings not found.\") else: #modify existingRatings.seconds = json.dumps(data[\"seconds\"]) db.session.commit() return jsonify({\"user\":", "def mediaRecognitionApi(): \"\"\" Retrieve the resource id, name, author and time index of", "path as pth from pytube import YouTube from pydub import AudioSegment from dejavu", "jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE'])", "== \"likes\") if likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not", "else: #create if likes: newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings = Dislikes(user=uid,", "type. \"\"\" if data and data.lower() not in MEDIA_TYPES: raise ValidationError('Mtype is invalid.')", "except IOError as err: print(\"Cannot open configuration: %s. Exiting\" % (str(err))) return config", "app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate using os db = SQLAlchemy(app)", "found.\") likes = (str(request.url_rule).split(\"/\")[-1] == \"likes\") if likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating", "jsonify(err.messages), 400 if request.method == 'POST': if existingRatings: abort(409, \"User ratings exists for", "process_json(self, data): \"\"\" Convert json string to array before passing it to dump().", "if not song: return {\"data\":{\"msg\":\"Media not found.\"}, \"code\": 404} return {\"data\":result, \"code\": 200}", "self.duration = duration self.author = author self.mtype = mtype self.sid = sid class", "task b4 returning return jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def fingerprintStatusApi(sid): \"\"\"", "return {\"data\":{\"msg\":\"Media not found.\"}, \"code\": 404} return {\"data\":result, \"code\": 200} @app.route('/hello', methods=['GET']) @jwt_required", "= Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user) token = create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media',", "get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self, media): \"\"\" Fingerprint and add a given media. \"\"\"", "sid <= 0: return {\"data\":{\"msg\":\"Media already exists.\"}, \"code\": 409} row = Media(name=media['name'], duration=media['duration'],", "'movie', 'music'] def init_config(configpath): \"\"\" Load config from a JSON file \"\"\" try:", "Ensure likes is not empty. \"\"\" if not data or len(data) == 0:", "recognizing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def mediaLikesApi(mid):", "= db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self, name, duration, author, mtype, sid): \"\"\" Initialize class.", "file = request.files['file'] if file.filename == '': abort(400, \"No selected file\") if file", "= Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list) return jsonify(data), 200 elif request.method == 'POST': if", "json string to array before passing it to dump(). \"\"\" data.seconds = json.loads(data.seconds)", "import Celery, states from celery.exceptions import Ignore from flask_sqlalchemy import SQLAlchemy from sqlalchemy.dialects.mysql", "@app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def mediaLikesApi(mid): \"\"\" Retrieve list of user likes", "def userSignatureValidator(data): \"\"\" Validate user signature. \"\"\" user = Users.query.filter_by(signature=data).first() if user !=", "not existingRatings: return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif request.method == 'DELETE': if not existingRatings:", "!= uid: raise Exception except Exception as e: abort(401) try: if Media.query.get(mid) ==", "= JWTManager(app) clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create upload folders on", "recognizer.get() return jsonify(result['data']), result['code'] if recognizer.failed(): return abort(500, \"Error recognizing media.\") return jsonify({\"uuid\":", "result['code'] if recognizer.failed(): return abort(500, \"Error recognizing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes',", "jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def mediaLikesApi(mid): \"\"\" Retrieve list", "try: data = media_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 asynctask =", "err: return {\"data\":{\"msg\":\"Unable to index media.\"}, \"code\": 500} if sid <= 0: return", "fields.Int(default=0, missing=0) mtype = fields.Str(required=True, validate=mediaTypeValidator) url = fields.Url(load_only=True) indexed = fields.Method('check_indexed', dump_only=True)", "def testInstall(): \"\"\" Test installation. \"\"\" return \"Hello \" + get_jwt_identity() @clry.task(bind=True) def", "= fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump def process_json(self, data): \"\"\" Convert json string to", "filenames result = {} try: song = djv.recognize(FileRecognizer, filepath) media = Media.query.filter_by(sid=song['song_id']).first() if", "db.Column(db.String(255), nullable=False) duration = db.Column(db.BigInteger, nullable=False) author = db.Column(db.String(255), nullable=False) mtype = db.Column(db.String(255),", "return jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\", 500) @app.route('/register', methods=['POST']) def registerApi(): \"\"\" Add a", "found.\") else: #modify existingRatings.seconds = json.dumps(data[\"seconds\"]) db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\":", "Map existing songs table to a db Model. \"\"\" table = db.Table(\"songs\", db.metadata,", "indexing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid): \"\"\" Retrieve the", "load_only=True, validate=userSignatureValidator) class MediaSchema(Schema): \"\"\" Media serialization/deserialization schema. \"\"\" id = fields.Int(required=True, dump_only=True)", "pytube import YouTube from pydub import AudioSegment from dejavu import Dejavu from dejavu.recognize", "media = fields.Int(required=True, dump_only=True) seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump def process_json(self, data):", "methods=['GET']) @jwt_required def helloApi(): \"\"\" Installation test. \"\"\" asynctask = testInstall.apply() if asynctask.ready()", "abort(404, \"Media not found.\") likes = (str(request.url_rule).split(\"/\")[-2] == \"likes\") if likes: qresult =", "seconds=json.dumps(data[\"seconds\"])) else: newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return jsonify({\"user\": uid, \"media\":", "YouTube from pydub import AudioSegment from dejavu import Dejavu from dejavu.recognize import FileRecognizer", "\"\"\" signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator) class MediaSchema(Schema): \"\"\" Media serialization/deserialization schema. \"\"\"", "= recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}), 202 abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def", "'.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp', 'ogg'])", "= True app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate using os db = SQLAlchemy(app) djv", "the status of a recognition activity. \"\"\" recognizer = recognizeMedia.AsyncResult(sid) if recognizer.ready(): if", "raise ValidationError('Please provide another signature.') def mediaTypeValidator(data): \"\"\" Validate media type. \"\"\" if", "media=mid) existingRatings = qresult.first() if request.method == 'GET': if not existingRatings: return jsonify({})", "'': abort(400, \"No selected file\") if file and allowed_file(file.filename): filename = secure_filename(file.filename) filepath", "request.method == 'GET': media_list = Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list) return jsonify(data), 200 elif", "= YouTube(url) except Exception as err: return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500} media['duration'] =", "not found.\") qresult.delete() db.session.commit() return jsonify({\"success\": True}) else: if not request.is_json or request.get_json()", "f: config = json.load(f) except IOError as err: print(\"Cannot open configuration: %s. Exiting\"", "xrange(0, len(stream_list)): if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i] break; if stream ==", "None: return {\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER) try:", "methods=['GET']) def mediaItemApi(mid): \"\"\" Retrieve the details for the media mid. \"\"\" media", "Increase no. workers @clry.task def testInstall(): \"\"\" Test installation. \"\"\" return \"Hello \"", "celery.exceptions import Ignore from flask_sqlalchemy import SQLAlchemy from sqlalchemy.dialects.mysql import MEDIUMINT from marshmallow", "uid): \"\"\" Retrieve, add & modify the user likes for a particular media.", "schema. \"\"\" id = fields.Int(required=True, dump_only=True) name = fields.Str(required=True) author = fields.Str(required=True) duration", "\"\"\" recognizer = recognizeMedia.AsyncResult(sid) if recognizer.ready(): if recognizer.successful(): result = recognizer.get() return jsonify(result['data']),", "db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine) __table__ = table id = table.c.song_id name = table.c.song_name", "media_schema = MediaSchema() media_list_schema = MediaSchema(many=True) user_likes_schema = LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True) def", "\"Media not found.\") likes = (str(request.url_rule).split(\"/\")[-2] == \"likes\") if likes: qresult = Likes.query.filter_by(user=uid,", "@jwt_required def recognitionStatusApi(sid): \"\"\" Retieve the status of a recognition activity. \"\"\" recognizer", "\"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models class Users(db.Model): \"\"\" Users model. \"\"\"", "class. \"\"\" self.user = user self.media = media self.seconds = seconds db.create_all() #", "= { \"id\": media.id, \"offset\": song['offset_seconds'], \"duration\": media.duration, \"match_time\": song['match_time'] } except Exception", "return jsonify(result['data']), result['code'] if fingerprinter.failed(): return abort(500, \"Error indexing media.\") return jsonify({\"uuid\": str(sid)}),", "in MEDIA_TYPES: raise ValidationError('Mtype is invalid.') def emptyLikesValidator(data): \"\"\" Ensure likes is not", "filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath) #os.remove(filepath) # rmv file after use except", "testInstall.apply() if asynctask.ready() and asynctask.successful(): return jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\", 500) @app.route('/register', methods=['POST'])", "not rating: jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST',", "= (str(request.url_rule).split(\"/\")[-1] == \"likes\") if likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all()", "\"\"\" Load config from a JSON file \"\"\" try: with open(configpath) as f:", "Load config from a JSON file \"\"\" try: with open(configpath) as f: config", "fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump def process_json(self, data): \"\"\" Convert json string to array", "flask_sqlalchemy import SQLAlchemy from sqlalchemy.dialects.mysql import MEDIUMINT from marshmallow import Schema, fields, ValidationError,", "\"\"\" data.seconds = json.loads(data.seconds) return data user_schema = UserSchema() media_schema = MediaSchema() media_list_schema", "from a JSON file \"\"\" try: with open(configpath) as f: config = json.load(f)", "= 'audio/webm' ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp', 'ogg']) MEDIA_TYPES = ['television', 'movie', 'music']", "mediaRecognitionApi(): \"\"\" Retrieve the resource id, name, author and time index of a", "validate=mediaTypeValidator) url = fields.Url(load_only=True) indexed = fields.Method('check_indexed', dump_only=True) def check_indexed(self, media): \"\"\" Return", "== False): print \"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models class Users(db.Model): \"\"\"", "\"\"\" Initialize class. \"\"\" self.signature = signature class IndexedMedia(db.Model): \"\"\" Map existing songs", "db.Column(db.Integer, primary_key=True, autoincrement=True) signature = db.Column(db.String(255), primary_key=True) def __init__(self, signature): \"\"\" Initialize class.", "Retrieve the details for the media mid. \"\"\" media = Media.query.get(mid) if not", "\"offset\": song['offset_seconds'], \"duration\": media.duration, \"match_time\": song['match_time'] } except Exception as e: return {\"data\":{\"msg\":\"Recognition", "= fields.Int(default=0, missing=0) mtype = fields.Str(required=True, validate=mediaTypeValidator) url = fields.Url(load_only=True) indexed = fields.Method('check_indexed',", "404} return {\"data\":result, \"code\": 200} @app.route('/hello', methods=['GET']) @jwt_required def helloApi(): \"\"\" Installation test.", "\"\"\" Return Boolean indicator if media is indexed. \"\"\" return not media.sid ==", "= Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row) return {\"data\": media_schema.dump(row), \"code\":", "\"Media not found.\") except Exception as e: abort(404, \"Ratings not found.\") likes =", "user self.media = media self.seconds = seconds class Dislikes(db.Model): \"\"\" Dislikes model. \"\"\"", "= testInstall.apply() if asynctask.ready() and asynctask.successful(): return jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\", 500) @app.route('/register',", "media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req user = db.Column(db.Integer,", "500) @app.route('/register', methods=['POST']) def registerApi(): \"\"\" Add a user to the database. \"\"\"", "duration self.author = author self.mtype = mtype self.sid = sid class Likes(db.Model): \"\"\"", "and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS #TODO: Increase no. workers @clry.task def testInstall(): \"\"\"", "Exception except Exception as e: abort(401) try: if Media.query.get(mid) == None: raise Exception", "abort(404, \"Media not found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required def mediaRecognitionApi(): \"\"\" Retrieve", "jsonify(result['data']), result['code'] if recognizer.failed(): return abort(500, \"Error recognizing media.\") return jsonify({\"uuid\": str(sid)}), 202", "if fingerprinter.successful(): result = fingerprinter.get() return jsonify(result['data']), result['code'] if fingerprinter.failed(): return abort(500, \"Error", "create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required def mediaApi(): \"\"\" Add &", "not media: abort(404, \"Media not found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required def mediaRecognitionApi():", "MediaSchema(many=True) user_likes_schema = LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True) def allowed_file(filename): return '.' in filename", "media_schema.dump(row), \"code\": 201} @clry.task(bind=True) def recognizeMedia(self, filepath): #TODO: Use sth better than filenames", "= SQLAlchemy(app) djv = Dejavu(config) jwt = JWTManager(app) clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL'])", "not provided.\") json_data = request.get_json() try: data = user_schema.load(json_data) except ValidationError as err:", "don't exist if fingerprinter.ready(): if fingerprinter.successful(): result = fingerprinter.get() return jsonify(result['data']), result['code'] if", "jsonify from werkzeug.utils import secure_filename from celery import Celery, states from celery.exceptions import", "\"\"\" Retieve the status of a recognition activity. \"\"\" recognizer = recognizeMedia.AsyncResult(sid) if", "stream = stream_list[i] break; if stream == None: return {\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\":", "request.method == 'POST': if not request.is_json or request.get_json() == None: abort(400, \"Json data", "user = fields.Int(required=True, dump_only=True) media = fields.Int(required=True, dump_only=True) seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator)", "'DELETE': if not existingRatings: abort(404, \"Ratings not found.\") qresult.delete() db.session.commit() return jsonify({\"success\": True})", "MediaSchema() media_list_schema = MediaSchema(many=True) user_likes_schema = LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True) def allowed_file(filename): return", "seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}), 201 elif request.method", "#TODO: Increase no. workers @clry.task def testInstall(): \"\"\" Test installation. \"\"\" return \"Hello", "the status of a fingerprinting task. \"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle sids", "UserSchema() media_schema = MediaSchema() media_list_schema = MediaSchema(many=True) user_likes_schema = LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True)", "'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @jwt_required def userLikesApi(mid, uid): \"\"\" Retrieve,", "'audio/webm' ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp', 'ogg']) MEDIA_TYPES = ['television', 'movie', 'music'] def", "# marshmallow schemas def userSignatureValidator(data): \"\"\" Validate user signature. \"\"\" user = Users.query.filter_by(signature=data).first()", "serialization/deserialization schema. \"\"\" #Discard seconds out of timer window user = fields.Int(required=True, dump_only=True)", "\"Json data not provided.\") json_data = request.get_json() try: data = media_schema.load(json_data) except ValidationError", "be empty.') class UserSchema(Schema): \"\"\" User serialization/deserialization schema. \"\"\" signature = fields.Str(required=True, load_only=True,", "author self.mtype = mtype self.sid = sid class Likes(db.Model): \"\"\" Likes model. \"\"\"", "def fingerprintMedia(self, media): \"\"\" Fingerprint and add a given media. \"\"\" url =", "= db.Column(db.String(255), nullable=False) duration = db.Column(db.BigInteger, nullable=False) author = db.Column(db.String(255), nullable=False) mtype =", "song['song_id'] result = { \"id\": media.id, \"offset\": song['offset_seconds'], \"duration\": media.duration, \"match_time\": song['match_time'] }", "return jsonify({\"success\": True}) else: if not request.is_json or request.get_json() == None: abort(400, \"Json", "elif request.method == 'DELETE': if not existingRatings: abort(404, \"Ratings not found.\") qresult.delete() db.session.commit()", "fields.Str(required=True, validate=mediaTypeValidator) url = fields.Url(load_only=True) indexed = fields.Method('check_indexed', dump_only=True) def check_indexed(self, media): \"\"\"", "except Exception as err: return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500} media['duration'] = int(yt.length) stream_list", "Test installation. \"\"\" return \"Hello \" + get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self, media): \"\"\"", "recognizer = recognizeMedia.AsyncResult(sid) if recognizer.ready(): if recognizer.successful(): result = recognizer.get() return jsonify(result['data']), result['code']", "mtype = fields.Str(required=True, validate=mediaTypeValidator) url = fields.Url(load_only=True) indexed = fields.Method('check_indexed', dump_only=True) def check_indexed(self,", "mediaApi(): \"\"\" Add & retrieve media. \"\"\" if request.method == 'GET': media_list =", "media self.seconds = seconds class Dislikes(db.Model): \"\"\" Dislikes model. \"\"\" media = db.Column(db.Integer,", "\"\"\" user = Users.query.filter_by(signature=data).first() if user != None: raise ValidationError('Please provide another signature.')", "\"\"\" Retrieve, add & modify the user likes for a particular media. \"\"\"", "stream_list = yt.streams.filter(only_audio=True).all() stream = None for i in xrange(0, len(stream_list)): if stream_list[i].mime_type", "seconds = db.Column(db.JSON, nullable=False) def __init__(self, user, media, seconds): \"\"\" Initialize class. \"\"\"", "= 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate using", "= request.files['file'] if file.filename == '': abort(400, \"No selected file\") if file and", "media. \"\"\" #TODO: Improve recognition if 'file' not in request.files: abort(400, \"No file.\")", "exists.\"}, \"code\": 409} row = Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row)", "serialization/deserialization schema. \"\"\" signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator) class MediaSchema(Schema): \"\"\" Media serialization/deserialization", "try: if Media.query.get(mid) == None: abort(404, \"Media not found.\") except Exception as e:", "return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @jwt_required", "!= None: #fingerprint try: yt = YouTube(url) except Exception as err: return {\"data\":{\"msg\":\"Media", "except Exception as err: return {\"data\":{\"msg\":\"Unable to index media.\"}, \"code\": 500} if sid", "TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp', 'ogg']) MEDIA_TYPES", "methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def mediaLikesApi(mid): \"\"\" Retrieve list of user likes for", "\"\"\" Validate user signature. \"\"\" user = Users.query.filter_by(signature=data).first() if user != None: raise", "False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath) #os.remove(filepath) # rmv file", "= request.get_json() try: data = user_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400", "def __init__(self, user, media, seconds): \"\"\" Initialize class. \"\"\" self.user = user self.media", "not existingRatings: abort(404, \"Ratings not found.\") qresult.delete() db.session.commit() return jsonify({\"success\": True}) else: if", "if data and data.lower() not in MEDIA_TYPES: raise ValidationError('Mtype is invalid.') def emptyLikesValidator(data):", "config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate using os db =", "if not existingRatings: abort(404, \"Ratings not found.\") else: #modify existingRatings.seconds = json.dumps(data[\"seconds\"]) db.session.commit()", "\"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required def mediaApi(): \"\"\" Add & retrieve media. \"\"\" if", "json.load(f) except IOError as err: print(\"Cannot open configuration: %s. Exiting\" % (str(err))) return", "methods=['POST']) def registerApi(): \"\"\" Add a user to the database. \"\"\" if not", "= set(['mp3', 'webm', '3gp', 'ogg']) MEDIA_TYPES = ['television', 'movie', 'music'] def init_config(configpath): \"\"\"", "is invalid.') def emptyLikesValidator(data): \"\"\" Ensure likes is not empty. \"\"\" if not", "= fingerprinter.get() return jsonify(result['data']), result['code'] if fingerprinter.failed(): return abort(500, \"Error indexing media.\") return", "json_data = request.get_json() try: data = user_schema.load(json_data) except ValidationError as err: return jsonify(err.messages),", "if fingerprinter.failed(): return abort(500, \"Error indexing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET'])", "config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY'] = 'super-secret'", "jwt_required, JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS =", "request.get_json() try: data = user_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 user", "== None class LikesDislikesSchema(Schema): \"\"\" Likes & dislikes serialization/deserialization schema. \"\"\" #Discard seconds", "as err: return {\"data\":{\"msg\":\"Unable to index media.\"}, \"code\": 500} if sid <= 0:", "= db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req user = db.Column(db.Integer, db.ForeignKey('users.id'),", "def recognizeMedia(self, filepath): #TODO: Use sth better than filenames result = {} try:", "== 'GET': if not existingRatings: return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif request.method == 'DELETE':", "= user self.media = media self.seconds = seconds class Dislikes(db.Model): \"\"\" Dislikes model.", "exists for media.\") else: #create if likes: newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else:", "jsonify(err.messages), 400 asynctask = fingerprintMedia.delay(data) #TODO: Ensure celery always recieves task b4 returning", "db.ForeignKey('users.id'), primary_key=True) seconds = db.Column(db.JSON, nullable=False) def __init__(self, user, media, seconds): \"\"\" Initialize", "if user != None: raise ValidationError('Please provide another signature.') def mediaTypeValidator(data): \"\"\" Validate", "app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'],", "mediaTypeValidator(data): \"\"\" Validate media type. \"\"\" if data and data.lower() not in MEDIA_TYPES:", "from dejavu.recognize import FileRecognizer from flask import Flask, request, abort, jsonify from werkzeug.utils", "author = fields.Str(required=True) duration = fields.Int(default=0, missing=0) mtype = fields.Str(required=True, validate=mediaTypeValidator) url =", "retrieve media. \"\"\" if request.method == 'GET': media_list = Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list)", "None: raise Exception except Exception as e: abort(404, \"Media not found.\") likes =", "emptyLikesValidator(data): \"\"\" Ensure likes is not empty. \"\"\" if not data or len(data)", "\"\"\" Media model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(255), nullable=False)", "__init__(self, signature): \"\"\" Initialize class. \"\"\" self.signature = signature class IndexedMedia(db.Model): \"\"\" Map", "to array before passing it to dump(). \"\"\" data.seconds = json.loads(data.seconds) return data", "\"duration\": media.duration, \"match_time\": song['match_time'] } except Exception as e: return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\":", "= secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask = recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}),", "uid, \"media\": mid, \"seconds\": data[\"seconds\"]}), 201 elif request.method == 'PUT': if not existingRatings:", "\"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that don't exist if fingerprinter.ready(): if", "try: data = user_likes_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 if request.method", "return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def mediaLikesApi(mid): \"\"\" Retrieve", "% (str(err))) return config config = init_config(\"CONFIG.json\") app = Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER", "celery import Celery, states from celery.exceptions import Ignore from flask_sqlalchemy import SQLAlchemy from", "dislikes serialization/deserialization schema. \"\"\" #Discard seconds out of timer window user = fields.Int(required=True,", "@app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def recognitionStatusApi(sid): \"\"\" Retieve the status of a recognition activity.", "asynctask.successful(): return jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\", 500) @app.route('/register', methods=['POST']) def registerApi(): \"\"\" Add", "user likes for a particular media. \"\"\" try: user = Users.query.filter_by(signature=get_jwt_identity()).first() if user", "data.seconds = json.loads(data.seconds) return data user_schema = UserSchema() media_schema = MediaSchema() media_list_schema =", "= Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'],", "== 'POST': if not request.is_json or request.get_json() == None: abort(400, \"Json data not", "\"Ratings not found.\") else: #modify existingRatings.seconds = json.dumps(data[\"seconds\"]) db.session.commit() return jsonify({\"user\": uid, \"media\":", "Handle sids that don't exist if fingerprinter.ready(): if fingerprinter.successful(): result = fingerprinter.get() return", "primary_key=True) seconds = db.Column(db.JSON, nullable=False) def __init__(self, user, media, seconds): \"\"\" Initialize class.", "= ['television', 'movie', 'music'] def init_config(configpath): \"\"\" Load config from a JSON file", "jsonify(result['data']), result['code'] if fingerprinter.failed(): return abort(500, \"Error indexing media.\") return jsonify({\"uuid\": str(sid)}), 202", "Improve recognition if 'file' not in request.files: abort(400, \"No file.\") file = request.files['file']", "i in xrange(0, len(stream_list)): if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i] break; if", "@app.route('/media', methods=['GET','POST']) @jwt_required def mediaApi(): \"\"\" Add & retrieve media. \"\"\" if request.method", "= Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating: jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE'])", "as e: abort(404, \"Ratings not found.\") likes = (str(request.url_rule).split(\"/\")[-1] == \"likes\") if likes:", "\"\"\" Add & retrieve media. \"\"\" if request.method == 'GET': media_list = Media.query.order_by(Media.name).all()", "import create_access_token, get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT =", "if not data or len(data) == 0: raise ValidationError('Seconds cannot be empty.') class", "%s. Exiting\" % (str(err))) return config config = init_config(\"CONFIG.json\") app = Flask(__name__) app.config['UPLOAD_FOLDER']", "os db = SQLAlchemy(app) djv = Dejavu(config) jwt = JWTManager(app) clry = Celery(app.name,", "a media. \"\"\" try: if Media.query.get(mid) == None: abort(404, \"Media not found.\") except", "request.get_json() == None: abort(400, \"Json data not provided.\") json_data = request.get_json() try: data", "return jsonify(err.messages), 400 user = Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user) token = create_access_token(identity=data['signature'], expires_delta=False)", "Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list) return jsonify(data), 200 elif request.method == 'POST': if not", "{\"data\":result, \"code\": 200} @app.route('/hello', methods=['GET']) @jwt_required def helloApi(): \"\"\" Installation test. \"\"\" asynctask", "'music'] def init_config(configpath): \"\"\" Load config from a JSON file \"\"\" try: with", "file.filename == '': abort(400, \"No selected file\") if file and allowed_file(file.filename): filename =", "sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self, name, duration, author, mtype, sid): \"\"\" Initialize", "201} @clry.task(bind=True) def recognizeMedia(self, filepath): #TODO: Use sth better than filenames result =", "not existingRatings: abort(404, \"Ratings not found.\") else: #modify existingRatings.seconds = json.dumps(data[\"seconds\"]) db.session.commit() return", "202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes', methods=['GET']) @jwt_required def mediaLikesApi(mid): \"\"\" Retrieve list of user", "\"code\": 200} @app.route('/hello', methods=['GET']) @jwt_required def helloApi(): \"\"\" Installation test. \"\"\" asynctask =", "fingerprinter.ready(): if fingerprinter.successful(): result = fingerprinter.get() return jsonify(result['data']), result['code'] if fingerprinter.failed(): return abort(500,", "details for the media mid. \"\"\" media = Media.query.get(mid) if not media: abort(404,", "= 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host'])", "{\"data\": media_schema.dump(row), \"code\": 201} @clry.task(bind=True) def recognizeMedia(self, filepath): #TODO: Use sth better than", "existing songs table to a db Model. \"\"\" table = db.Table(\"songs\", db.metadata, autoload=True,", "window user = fields.Int(required=True, dump_only=True) media = fields.Int(required=True, dump_only=True) seconds = fields.List(fields.Int(), required=True,", "} except Exception as e: return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500} if not song:", "not in request.files: abort(400, \"No file.\") file = request.files['file'] if file.filename == '':", "= recognizeMedia.AsyncResult(sid) if recognizer.ready(): if recognizer.successful(): result = recognizer.get() return jsonify(result['data']), result['code'] if", "uid: raise Exception except Exception as e: abort(401) try: if Media.query.get(mid) == None:", "\"\"\" Media serialization/deserialization schema. \"\"\" id = fields.Int(required=True, dump_only=True) name = fields.Str(required=True) author", "ratings exists for media.\") else: #create if likes: newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"]))", "not found.\") likes = (str(request.url_rule).split(\"/\")[-2] == \"likes\") if likes: qresult = Likes.query.filter_by(user=uid, media=mid)", "request.is_json or request.get_json() == None: abort(400, \"Json data not provided.\") json_data = request.get_json()", "if media is indexed. \"\"\" return not media.sid == None class LikesDislikesSchema(Schema): \"\"\"", "user_schema = UserSchema() media_schema = MediaSchema() media_list_schema = MediaSchema(many=True) user_likes_schema = LikesDislikesSchema() media_likes_schema", "= Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}),", "def init_config(configpath): \"\"\" Load config from a JSON file \"\"\" try: with open(configpath)", "model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req user", "qresult = Likes.query.filter_by(user=uid, media=mid) else: qresult = Dislikes.query.filter_by(user=uid, media=mid) existingRatings = qresult.first() if", "recognizeMedia(self, filepath): #TODO: Use sth better than filenames result = {} try: song", "table.c.song_name class Media(db.Model): \"\"\" Media model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) name", "return {\"data\":result, \"code\": 200} @app.route('/hello', methods=['GET']) @jwt_required def helloApi(): \"\"\" Installation test. \"\"\"", "result = fingerprinter.get() return jsonify(result['data']), result['code'] if fingerprinter.failed(): return abort(500, \"Error indexing media.\")", "\"code\": 500} if sid <= 0: return {\"data\":{\"msg\":\"Media already exists.\"}, \"code\": 409} row", "db = SQLAlchemy(app) djv = Dejavu(config) jwt = JWTManager(app) clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'],", "\"code\": 500} media['duration'] = int(yt.length) stream_list = yt.streams.filter(only_audio=True).all() stream = None for i", "primary_key=True) def __init__(self, signature): \"\"\" Initialize class. \"\"\" self.signature = signature class IndexedMedia(db.Model):", "id = fields.Int(required=True, dump_only=True) name = fields.Str(required=True) author = fields.Str(required=True) duration = fields.Int(default=0,", "media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return jsonify({\"user\": uid,", "\"\"\" Dislikes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key", "if sid <= 0: return {\"data\":{\"msg\":\"Media already exists.\"}, \"code\": 409} row = Media(name=media['name'],", "'GET': media_list = Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list) return jsonify(data), 200 elif request.method ==", "\"\"\" Ensure likes is not empty. \"\"\" if not data or len(data) ==", "dump(). \"\"\" data.seconds = json.loads(data.seconds) return data user_schema = UserSchema() media_schema = MediaSchema()", "print(\"Cannot open configuration: %s. Exiting\" % (str(err))) return config config = init_config(\"CONFIG.json\") app", "get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS", "nullable=False) mtype = db.Column(db.String(255), nullable=False) sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self, name, duration,", "mtype self.sid = sid class Likes(db.Model): \"\"\" Likes model. \"\"\" media = db.Column(db.Integer,", "url = fields.Url(load_only=True) indexed = fields.Method('check_indexed', dump_only=True) def check_indexed(self, media): \"\"\" Return Boolean", "mid. \"\"\" media = Media.query.get(mid) if not media: abort(404, \"Media not found.\") return", "filepath = pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask = recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}), 202 abort(400,", "ALLOWED_EXTENSIONS #TODO: Increase no. workers @clry.task def testInstall(): \"\"\" Test installation. \"\"\" return", "\"Error indexing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid): \"\"\" Retrieve", "len(data) == 0: raise ValidationError('Seconds cannot be empty.') class UserSchema(Schema): \"\"\" User serialization/deserialization", "a recognition activity. \"\"\" recognizer = recognizeMedia.AsyncResult(sid) if recognizer.ready(): if recognizer.successful(): result =", "abort(400, \"No selected file\") if file and allowed_file(file.filename): filename = secure_filename(file.filename) filepath =", "nullable=False) def __init__(self, user, media, seconds): \"\"\" Initialize class. \"\"\" self.user = user", "data not provided.\") json_data = request.get_json() try: data = user_likes_schema.load(json_data) except ValidationError as", "Media.query.get(mid) if not media: abort(404, \"Media not found.\") return jsonify(media_schema.dump(media)) @app.route('/media/recognize', methods=['POST']) @jwt_required", "= db.Column(db.Integer, primary_key=True, autoincrement=True) signature = db.Column(db.String(255), primary_key=True) def __init__(self, signature): \"\"\" Initialize", "try: data = user_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 user =", "elif request.method == 'PUT': if not existingRatings: abort(404, \"Ratings not found.\") else: #modify", "jsonify(data), 200 elif request.method == 'POST': if not request.is_json or request.get_json() == None:", "before passing it to dump(). \"\"\" data.seconds = json.loads(data.seconds) return data user_schema =", "jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required def mediaApi(): \"\"\" Add & retrieve media. \"\"\"", "= db.Column(db.String(255), nullable=False) mtype = db.Column(db.String(255), nullable=False) sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self,", "abort(400, \"Json data not provided.\") json_data = request.get_json() try: data = user_likes_schema.load(json_data) except", "name, author and time index of a sampled media. \"\"\" #TODO: Improve recognition", "timer window user = fields.Int(required=True, dump_only=True) media = fields.Int(required=True, dump_only=True) seconds = fields.List(fields.Int(),", "= 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY']", "use except Exception as err: return {\"data\":{\"msg\":\"Unable to index media.\"}, \"code\": 500} if", "return {\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath", "Model. \"\"\" table = db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine) __table__ = table id =", "'DELETE']) @jwt_required def userLikesApi(mid, uid): \"\"\" Retrieve, add & modify the user likes", "= Media.query.filter_by(sid=song['song_id']).first() if media: print song['song_id'] result = { \"id\": media.id, \"offset\": song['offset_seconds'],", "= Dislikes.query.filter_by(user=uid, media=mid) existingRatings = qresult.first() if request.method == 'GET': if not existingRatings:", "if request.method == 'GET': media_list = Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list) return jsonify(data), 200", "import SQLAlchemy from sqlalchemy.dialects.mysql import MEDIUMINT from marshmallow import Schema, fields, ValidationError, pre_dump", "array before passing it to dump(). \"\"\" data.seconds = json.loads(data.seconds) return data user_schema", "not found.\"}, \"code\": 404} return {\"data\":result, \"code\": 200} @app.route('/hello', methods=['GET']) @jwt_required def helloApi():", "= pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask = recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}), 202 abort(400, \"Bad", "required=True, validate=emptyLikesValidator) @pre_dump def process_json(self, data): \"\"\" Convert json string to array before", "fingerprintStatusApi(sid): \"\"\" Retrieve the status of a fingerprinting task. \"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid)", "media_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 asynctask = fingerprintMedia.delay(data) #TODO: Ensure", "import MEDIUMINT from marshmallow import Schema, fields, ValidationError, pre_dump from flask_jwt_extended import create_access_token,", "if likes: qresult = Likes.query.filter_by(user=uid, media=mid) else: qresult = Dislikes.query.filter_by(user=uid, media=mid) existingRatings =", "always recieves task b4 returning return jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def", "for media.\") else: #create if likes: newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings", "{\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath =", "return jsonify(err.messages), 400 if request.method == 'POST': if existingRatings: abort(409, \"User ratings exists", "if media: print song['song_id'] result = { \"id\": media.id, \"offset\": song['offset_seconds'], \"duration\": media.duration,", "\"\"\" if request.method == 'GET': media_list = Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list) return jsonify(data),", "ValidationError as err: return jsonify(err.messages), 400 user = Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user) token", "from os import path as pth from pytube import YouTube from pydub import", "== 0: raise ValidationError('Seconds cannot be empty.') class UserSchema(Schema): \"\"\" User serialization/deserialization schema.", "media = Media.query.filter_by(sid=song['song_id']).first() if media: print song['song_id'] result = { \"id\": media.id, \"offset\":", "\"\"\" Installation test. \"\"\" asynctask = testInstall.apply() if asynctask.ready() and asynctask.successful(): return jsonify({\"msg\":", "request.method == 'POST': if existingRatings: abort(409, \"User ratings exists for media.\") else: #create", "recieves task b4 returning return jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def fingerprintStatusApi(sid):", "model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) signature = db.Column(db.String(255), primary_key=True) def __init__(self,", "self.user = user self.media = media self.seconds = seconds db.create_all() # marshmallow schemas", "ValidationError as err: return jsonify(err.messages), 400 if request.method == 'POST': if existingRatings: abort(409,", "= '.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp',", "MediaSchema(Schema): \"\"\" Media serialization/deserialization schema. \"\"\" id = fields.Int(required=True, dump_only=True) name = fields.Str(required=True)", "installation. \"\"\" return \"Hello \" + get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self, media): \"\"\" Fingerprint", "= LikesDislikesSchema(many=True) def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "from dejavu import Dejavu from dejavu.recognize import FileRecognizer from flask import Flask, request,", "Retrieve, add & modify the user likes for a particular media. \"\"\" try:", "stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i] break; if stream == None: return {\"data\":{\"msg\":\"Media", "data[\"seconds\"]}), 201 elif request.method == 'PUT': if not existingRatings: abort(404, \"Ratings not found.\")", "not request.is_json or request.get_json() == None: abort(400, \"Json data not provided.\") json_data =", "schema. \"\"\" #Discard seconds out of timer window user = fields.Int(required=True, dump_only=True) media", "Media.query.filter_by(sid=song['song_id']).first() if media: print song['song_id'] result = { \"id\": media.id, \"offset\": song['offset_seconds'], \"duration\":", "Boolean indicator if media is indexed. \"\"\" return not media.sid == None class", "pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask = recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}), 202 abort(400, \"Bad request\")", "abort(404, \"Ratings not found.\") qresult.delete() db.session.commit() return jsonify({\"success\": True}) else: if not request.is_json", "try: song = djv.recognize(FileRecognizer, filepath) media = Media.query.filter_by(sid=song['song_id']).first() if media: print song['song_id'] result", "= MediaSchema() media_list_schema = MediaSchema(many=True) user_likes_schema = LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True) def allowed_file(filename):", "return jsonify({\"uuid\": asynctask.task_id}), 202 abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def recognitionStatusApi(sid): \"\"\"", "empty. \"\"\" if not data or len(data) == 0: raise ValidationError('Seconds cannot be", "task. \"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that don't exist if fingerprinter.ready():", "Flask, request, abort, jsonify from werkzeug.utils import secure_filename from celery import Celery, states", "try: with open(configpath) as f: config = json.load(f) except IOError as err: print(\"Cannot", "if (pth.isdir(TMP_UPLOAD_FOLDER) == False): print \"Creating upload folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models class", "abort(409, \"User ratings exists for media.\") else: #create if likes: newRatings = Likes(user=uid,", "row = Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row) return {\"data\": media_schema.dump(row),", "\"\"\" #TODO: Improve recognition if 'file' not in request.files: abort(400, \"No file.\") file", "#fingerprint try: yt = YouTube(url) except Exception as err: return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\":", "\"\"\" if not request.is_json or request.get_json() == None: abort(400, \"Json data not provided.\")", "for i in xrange(0, len(stream_list)): if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i] break;", "UserSchema(Schema): \"\"\" User serialization/deserialization schema. \"\"\" signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator) class MediaSchema(Schema):", "user = Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user) token = create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token})", "err: return jsonify(err.messages), 400 asynctask = fingerprintMedia.delay(data) #TODO: Ensure celery always recieves task", "'.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS #TODO: Increase no. workers @clry.task", "{\"data\":{\"msg\":\"Media not found.\"}, \"code\": 404} return {\"data\":result, \"code\": 200} @app.route('/hello', methods=['GET']) @jwt_required def", "app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True", "found.\") except Exception as e: abort(404, \"Ratings not found.\") likes = (str(request.url_rule).split(\"/\")[-1] ==", "201 elif request.method == 'PUT': if not existingRatings: abort(404, \"Ratings not found.\") else:", "(str(err))) return config config = init_config(\"CONFIG.json\") app = Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL']", "test. \"\"\" asynctask = testInstall.apply() if asynctask.ready() and asynctask.successful(): return jsonify({\"msg\": \"Success!\"}) abort(\"Bad", "'3gp', 'ogg']) MEDIA_TYPES = ['television', 'movie', 'music'] def init_config(configpath): \"\"\" Load config from", "string to array before passing it to dump(). \"\"\" data.seconds = json.loads(data.seconds) return", "asynctask = recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}), 202 abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required", "abort(400, \"Json data not provided.\") json_data = request.get_json() try: data = media_schema.load(json_data) except", "= user self.media = media self.seconds = seconds db.create_all() # marshmallow schemas def", "YouTube(url) except Exception as err: return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500} media['duration'] = int(yt.length)", "in ALLOWED_EXTENSIONS #TODO: Increase no. workers @clry.task def testInstall(): \"\"\" Test installation. \"\"\"", "import secure_filename from celery import Celery, states from celery.exceptions import Ignore from flask_sqlalchemy", "db.create_all() # marshmallow schemas def userSignatureValidator(data): \"\"\" Validate user signature. \"\"\" user =", "if asynctask.ready() and asynctask.successful(): return jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\", 500) @app.route('/register', methods=['POST']) def", "name self.duration = duration self.author = author self.mtype = mtype self.sid = sid", "broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create upload folders on app load if (pth.isdir(TMP_UPLOAD_FOLDER) == False):", "\"\"\" Retrieve list of user likes for a media. \"\"\" try: if Media.query.get(mid)", "= '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp', 'ogg']) MEDIA_TYPES =", "request.method == 'GET': if not existingRatings: return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif request.method ==", "check_indexed(self, media): \"\"\" Return Boolean indicator if media is indexed. \"\"\" return not", "author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row) return {\"data\": media_schema.dump(row), \"code\": 201} @clry.task(bind=True) def", "#os.remove(filepath) # rmv file after use except Exception as err: return {\"data\":{\"msg\":\"Unable to", "not song: return {\"data\":{\"msg\":\"Media not found.\"}, \"code\": 404} return {\"data\":result, \"code\": 200} @app.route('/hello',", "\"\"\" Add a user to the database. \"\"\" if not request.is_json or request.get_json()", "secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask = recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}), 202", "found.\") qresult.delete() db.session.commit() return jsonify({\"success\": True}) else: if not request.is_json or request.get_json() ==", "file.save(filepath) asynctask = recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}), 202 abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET'])", "return not media.sid == None class LikesDislikesSchema(Schema): \"\"\" Likes & dislikes serialization/deserialization schema.", "import Dejavu from dejavu.recognize import FileRecognizer from flask import Flask, request, abort, jsonify", "db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds", "filename) file.save(filepath) asynctask = recognizeMedia.delay(filepath) return jsonify({\"uuid\": asynctask.task_id}), 202 abort(400, \"Bad request\") @app.route('/media/recognize/status/<uuid:sid>',", "db.session.add(newRatings) db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}), 201 elif request.method ==", "to index media.\"}, \"code\": 500} if sid <= 0: return {\"data\":{\"msg\":\"Media already exists.\"},", "media): \"\"\" Return Boolean indicator if media is indexed. \"\"\" return not media.sid", "\"No selected file\") if file and allowed_file(file.filename): filename = secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'],", "as f: config = json.load(f) except IOError as err: print(\"Cannot open configuration: %s.", "raise Exception except Exception as e: abort(401) try: if Media.query.get(mid) == None: raise", "{\"data\":{\"msg\":\"Media already exists.\"}, \"code\": 409} row = Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid) db.session.add(row)", "400 asynctask = fingerprintMedia.delay(data) #TODO: Ensure celery always recieves task b4 returning return", "existingRatings = qresult.first() if request.method == 'GET': if not existingRatings: return jsonify({}) return", "User serialization/deserialization schema. \"\"\" signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator) class MediaSchema(Schema): \"\"\" Media", "Ignore from flask_sqlalchemy import SQLAlchemy from sqlalchemy.dialects.mysql import MEDIUMINT from marshmallow import Schema,", "class Dislikes(db.Model): \"\"\" Dislikes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int", "filepath): #TODO: Use sth better than filenames result = {} try: song =", "e: abort(401) try: if Media.query.get(mid) == None: raise Exception except Exception as e:", "not media.sid == None class LikesDislikesSchema(Schema): \"\"\" Likes & dislikes serialization/deserialization schema. \"\"\"", "SQLAlchemy(app) djv = Dejavu(config) jwt = JWTManager(app) clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config)", "ValidationError('Mtype is invalid.') def emptyLikesValidator(data): \"\"\" Ensure likes is not empty. \"\"\" if", "djv.fingerprint_file(filepath) #os.remove(filepath) # rmv file after use except Exception as err: return {\"data\":{\"msg\":\"Unable", "\"\"\" Test installation. \"\"\" return \"Hello \" + get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self, media):", "if not request.is_json or request.get_json() == None: abort(400, \"Json data not provided.\") json_data", "than filenames result = {} try: song = djv.recognize(FileRecognizer, filepath) media = Media.query.filter_by(sid=song['song_id']).first()", "the media mid. \"\"\" media = Media.query.get(mid) if not media: abort(404, \"Media not", "DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i] break; if stream == None: return {\"data\":{\"msg\":\"Media stream unavailable.\"},", "= djv.recognize(FileRecognizer, filepath) media = Media.query.filter_by(sid=song['song_id']).first() if media: print song['song_id'] result = {", "likes: newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings)", "configuration: %s. Exiting\" % (str(err))) return config config = init_config(\"CONFIG.json\") app = Flask(__name__)", "= mtype self.sid = sid class Likes(db.Model): \"\"\" Likes model. \"\"\" media =", "as e: return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500} if not song: return {\"data\":{\"msg\":\"Media not", "given media. \"\"\" url = media.get(\"url\", None) sid = None if url !=", "data): \"\"\" Convert json string to array before passing it to dump(). \"\"\"", "return {\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500} media['duration'] = int(yt.length) stream_list = yt.streams.filter(only_audio=True).all() stream =", "media is indexed. \"\"\" return not media.sid == None class LikesDislikesSchema(Schema): \"\"\" Likes", "db.Column(db.String(255), nullable=False) sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self, name, duration, author, mtype, sid):", "class UserSchema(Schema): \"\"\" User serialization/deserialization schema. \"\"\" signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator) class", "as e: abort(401) try: if Media.query.get(mid) == None: raise Exception except Exception as", "= fields.Int(required=True, dump_only=True) seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump def process_json(self, data): \"\"\"", "202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def fingerprintStatusApi(sid): \"\"\" Retrieve the status of a fingerprinting", "= media_schema.load(json_data) except ValidationError as err: return jsonify(err.messages), 400 asynctask = fingerprintMedia.delay(data) #TODO:", "likes for a particular media. \"\"\" try: user = Users.query.filter_by(signature=get_jwt_identity()).first() if user ==", "= fields.Str(required=True, load_only=True, validate=userSignatureValidator) class MediaSchema(Schema): \"\"\" Media serialization/deserialization schema. \"\"\" id =", "signature.') def mediaTypeValidator(data): \"\"\" Validate media type. \"\"\" if data and data.lower() not", "abort(401) try: if Media.query.get(mid) == None: raise Exception except Exception as e: abort(404,", "\"\"\" Initialize class. \"\"\" self.user = user self.media = media self.seconds = seconds", "methods=['GET', 'POST', 'PUT', 'DELETE']) @jwt_required def userLikesApi(mid, uid): \"\"\" Retrieve, add & modify", "'ogg']) MEDIA_TYPES = ['television', 'movie', 'music'] def init_config(configpath): \"\"\" Load config from a", "if not rating: jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET',", "e: abort(404, \"Ratings not found.\") likes = (str(request.url_rule).split(\"/\")[-1] == \"likes\") if likes: rating", "{\"data\":{\"msg\":\"Media unavailable.\"}, \"code\": 500} media['duration'] = int(yt.length) stream_list = yt.streams.filter(only_audio=True).all() stream = None", "result = { \"id\": media.id, \"offset\": song['offset_seconds'], \"duration\": media.duration, \"match_time\": song['match_time'] } except", "Media serialization/deserialization schema. \"\"\" id = fields.Int(required=True, dump_only=True) name = fields.Str(required=True) author =", "dejavu.recognize import FileRecognizer from flask import Flask, request, abort, jsonify from werkzeug.utils import", "'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @jwt_required def userLikesApi(mid, uid): \"\"\"", "'GET': if not existingRatings: return jsonify({}) return jsonify(user_likes_schema.dump(existingRatings)) elif request.method == 'DELETE': if", "newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\":", "\"\"\" try: if Media.query.get(mid) == None: abort(404, \"Media not found.\") except Exception as", "\"\"\" Retrieve the details for the media mid. \"\"\" media = Media.query.get(mid) if", "None: abort(400, \"Json data not provided.\") json_data = request.get_json() try: data = user_schema.load(json_data)", "MEDIA_TYPES: raise ValidationError('Mtype is invalid.') def emptyLikesValidator(data): \"\"\" Ensure likes is not empty.", "\"match_time\": song['match_time'] } except Exception as e: return {\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500} if", "user_likes_schema = LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True) def allowed_file(filename): return '.' in filename and", "filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS #TODO: Increase no. workers @clry.task def testInstall(): \"\"\" Test", "to the database. \"\"\" if not request.is_json or request.get_json() == None: abort(400, \"Json", "== None or user.id != uid: raise Exception except Exception as e: abort(401)", "recognizer.failed(): return abort(500, \"Error recognizing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>/likes', methods=['GET']) @app.route('/media/<int:mid>/dislikes',", "\"Ratings not found.\") qresult.delete() db.session.commit() return jsonify({\"success\": True}) else: if not request.is_json or", "def mediaApi(): \"\"\" Add & retrieve media. \"\"\" if request.method == 'GET': media_list", "\"code\": 404} return {\"data\":result, \"code\": 200} @app.route('/hello', methods=['GET']) @jwt_required def helloApi(): \"\"\" Installation", "likes for a media. \"\"\" try: if Media.query.get(mid) == None: abort(404, \"Media not", "\"Json data not provided.\") json_data = request.get_json() try: data = user_likes_schema.load(json_data) except ValidationError", "db.session.add(row) db.session.commit() db.session.refresh(row) return {\"data\": media_schema.dump(row), \"code\": 201} @clry.task(bind=True) def recognizeMedia(self, filepath): #TODO:", "to a db Model. \"\"\" table = db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine) __table__ =", "try: filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid = djv.fingerprint_file(filepath) #os.remove(filepath) # rmv file after use", "exist if fingerprinter.ready(): if fingerprinter.successful(): result = fingerprinter.get() return jsonify(result['data']), result['code'] if fingerprinter.failed():", "{\"data\":{\"msg\":\"Recognition failed.\"}, \"code\": 500} if not song: return {\"data\":{\"msg\":\"Media not found.\"}, \"code\": 404}", "if request.method == 'POST': if existingRatings: abort(409, \"User ratings exists for media.\") else:", "as e: abort(404, \"Media not found.\") likes = (str(request.url_rule).split(\"/\")[-2] == \"likes\") if likes:", "def userLikesApi(mid, uid): \"\"\" Retrieve, add & modify the user likes for a", "schema. \"\"\" signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator) class MediaSchema(Schema): \"\"\" Media serialization/deserialization schema.", "no. workers @clry.task def testInstall(): \"\"\" Test installation. \"\"\" return \"Hello \" +", "media.sid == None class LikesDislikesSchema(Schema): \"\"\" Likes & dislikes serialization/deserialization schema. \"\"\" #Discard", "add a given media. \"\"\" url = media.get(\"url\", None) sid = None if", "a fingerprinting task. \"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that don't exist", "of a sampled media. \"\"\" #TODO: Improve recognition if 'file' not in request.files:", "methods=['POST']) @jwt_required def mediaRecognitionApi(): \"\"\" Retrieve the resource id, name, author and time", "== '': abort(400, \"No selected file\") if file and allowed_file(file.filename): filename = secure_filename(file.filename)", "import json from os import path as pth from pytube import YouTube from", "table to a db Model. \"\"\" table = db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine) __table__", "TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'],", "of a recognition activity. \"\"\" recognizer = recognizeMedia.AsyncResult(sid) if recognizer.ready(): if recognizer.successful(): result", "<= 0: return {\"data\":{\"msg\":\"Media already exists.\"}, \"code\": 409} row = Media(name=media['name'], duration=media['duration'], author=media['author'],", "media=mid) else: qresult = Dislikes.query.filter_by(user=uid, media=mid) existingRatings = qresult.first() if request.method == 'GET':", "unavailable.\"}, \"code\": 500} media['duration'] = int(yt.length) stream_list = yt.streams.filter(only_audio=True).all() stream = None for", "MEDIA_TYPES = ['television', 'movie', 'music'] def init_config(configpath): \"\"\" Load config from a JSON", "\"\"\" Users model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) signature = db.Column(db.String(255), primary_key=True)", "\"\"\" table = db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine) __table__ = table id = table.c.song_id", "\"\"\" url = media.get(\"url\", None) sid = None if url != None: #fingerprint", "= media self.seconds = seconds class Dislikes(db.Model): \"\"\" Dislikes model. \"\"\" media =", "Users model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) signature = db.Column(db.String(255), primary_key=True) def", "of a fingerprinting task. \"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that don't", "= LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True) def allowed_file(filename): return '.' in filename and filename.rsplit('.',", "songs table to a db Model. \"\"\" table = db.Table(\"songs\", db.metadata, autoload=True, autoload_with=db.engine)", "not found.\") except Exception as e: abort(404, \"Ratings not found.\") likes = (str(request.url_rule).split(\"/\")[-1]", "\" + get_jwt_identity() @clry.task(bind=True) def fingerprintMedia(self, media): \"\"\" Fingerprint and add a given", "the resource id, name, author and time index of a sampled media. \"\"\"", "rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating: jsonify([]) return jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT',", "LikesDislikesSchema(Schema): \"\"\" Likes & dislikes serialization/deserialization schema. \"\"\" #Discard seconds out of timer", "return config config = init_config(\"CONFIG.json\") app = Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] =", "mediaItemApi(mid): \"\"\" Retrieve the details for the media mid. \"\"\" media = Media.query.get(mid)", "data or len(data) == 0: raise ValidationError('Seconds cannot be empty.') class UserSchema(Schema): \"\"\"", "\"\"\" try: with open(configpath) as f: config = json.load(f) except IOError as err:", "yt.streams.filter(only_audio=True).all() stream = None for i in xrange(0, len(stream_list)): if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT:", "return jsonify(user_likes_schema.dump(existingRatings)) elif request.method == 'DELETE': if not existingRatings: abort(404, \"Ratings not found.\")", "Celery, states from celery.exceptions import Ignore from flask_sqlalchemy import SQLAlchemy from sqlalchemy.dialects.mysql import", "Likes(db.Model): \"\"\" Likes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary", "ValidationError as err: return jsonify(err.messages), 400 asynctask = fingerprintMedia.delay(data) #TODO: Ensure celery always", "= {} try: song = djv.recognize(FileRecognizer, filepath) media = Media.query.filter_by(sid=song['song_id']).first() if media: print", "media_list_schema.dump(media_list) return jsonify(data), 200 elif request.method == 'POST': if not request.is_json or request.get_json()", "db.metadata, autoload=True, autoload_with=db.engine) __table__ = table id = table.c.song_id name = table.c.song_name class", "return abort(500, \"Error indexing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid):", "Initialize class. \"\"\" self.user = user self.media = media self.seconds = seconds db.create_all()", "abort(\"Bad installation\", 500) @app.route('/register', methods=['POST']) def registerApi(): \"\"\" Add a user to the", "not provided.\") json_data = request.get_json() try: data = media_schema.load(json_data) except ValidationError as err:", "JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER = '.tmp-upload/' DOWNLOAD_AUDIO_FORMAT = 'audio/webm' ALLOWED_EXTENSIONS = set(['mp3',", "primary_key=True) #circumvent int primary key req user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds =", "jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}), 201 elif request.method == 'PUT': if not", "= 'super-secret' #TODO: Generate using os db = SQLAlchemy(app) djv = Dejavu(config) jwt", "db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)", "@app.route('/register', methods=['POST']) def registerApi(): \"\"\" Add a user to the database. \"\"\" if", "= seconds db.create_all() # marshmallow schemas def userSignatureValidator(data): \"\"\" Validate user signature. \"\"\"", "'PUT': if not existingRatings: abort(404, \"Ratings not found.\") else: #modify existingRatings.seconds = json.dumps(data[\"seconds\"])", "autoincrement=True) name = db.Column(db.String(255), nullable=False) duration = db.Column(db.BigInteger, nullable=False) author = db.Column(db.String(255), nullable=False)", "it to dump(). \"\"\" data.seconds = json.loads(data.seconds) return data user_schema = UserSchema() media_schema", "media=mid, seconds=json.dumps(data[\"seconds\"])) db.session.add(newRatings) db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}), 201 elif", "\"\"\" try: user = Users.query.filter_by(signature=get_jwt_identity()).first() if user == None or user.id != uid:", "media. \"\"\" try: if Media.query.get(mid) == None: abort(404, \"Media not found.\") except Exception", "= fields.Str(required=True, validate=mediaTypeValidator) url = fields.Url(load_only=True) indexed = fields.Method('check_indexed', dump_only=True) def check_indexed(self, media):", "print song['song_id'] result = { \"id\": media.id, \"offset\": song['offset_seconds'], \"duration\": media.duration, \"match_time\": song['match_time']", "\"\"\" id = fields.Int(required=True, dump_only=True) name = fields.Str(required=True) author = fields.Str(required=True) duration =", "try: if Media.query.get(mid) == None: raise Exception except Exception as e: abort(404, \"Media", "def registerApi(): \"\"\" Add a user to the database. \"\"\" if not request.is_json", "= media_list_schema.dump(media_list) return jsonify(data), 200 elif request.method == 'POST': if not request.is_json or", "Media model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(255), nullable=False) duration", "mediaLikesApi(mid): \"\"\" Retrieve list of user likes for a media. \"\"\" try: if", "from flask import Flask, request, abort, jsonify from werkzeug.utils import secure_filename from celery", "@jwt_required def mediaRecognitionApi(): \"\"\" Retrieve the resource id, name, author and time index", "data and data.lower() not in MEDIA_TYPES: raise ValidationError('Mtype is invalid.') def emptyLikesValidator(data): \"\"\"", "else: qresult = Dislikes.query.filter_by(user=uid, media=mid) existingRatings = qresult.first() if request.method == 'GET': if", "and data.lower() not in MEDIA_TYPES: raise ValidationError('Mtype is invalid.') def emptyLikesValidator(data): \"\"\" Ensure", "= name self.duration = duration self.author = author self.mtype = mtype self.sid =", "sid=sid) db.session.add(row) db.session.commit() db.session.refresh(row) return {\"data\": media_schema.dump(row), \"code\": 201} @clry.task(bind=True) def recognizeMedia(self, filepath):", "@jwt_required def helloApi(): \"\"\" Installation test. \"\"\" asynctask = testInstall.apply() if asynctask.ready() and", "media['duration'] = int(yt.length) stream_list = yt.streams.filter(only_audio=True).all() stream = None for i in xrange(0,", "= fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that don't exist if fingerprinter.ready(): if fingerprinter.successful(): result", "seconds class Dislikes(db.Model): \"\"\" Dislikes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent", "djv = Dejavu(config) jwt = JWTManager(app) clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) #", "request\") @app.route('/media/recognize/status/<uuid:sid>', methods=['GET']) @jwt_required def recognitionStatusApi(sid): \"\"\" Retieve the status of a recognition", "Exception as e: abort(404, \"Media not found.\") likes = (str(request.url_rule).split(\"/\")[-2] == \"likes\") if", "return {\"data\":{\"msg\":\"Media already exists.\"}, \"code\": 409} row = Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid)", "abort(400, \"No file.\") file = request.files['file'] if file.filename == '': abort(400, \"No selected", "allowed_file(file.filename): filename = secure_filename(file.filename) filepath = pth.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) asynctask = recognizeMedia.delay(filepath) return", "& modify the user likes for a particular media. \"\"\" try: user =", "user != None: raise ValidationError('Please provide another signature.') def mediaTypeValidator(data): \"\"\" Validate media", "missing=0) mtype = fields.Str(required=True, validate=mediaTypeValidator) url = fields.Url(load_only=True) indexed = fields.Method('check_indexed', dump_only=True) def", "folder\" os.mkdir(TMP_UPLOAD_FOLDER) # SQLAlchemy models class Users(db.Model): \"\"\" Users model. \"\"\" id =", "nullable=False) author = db.Column(db.String(255), nullable=False) mtype = db.Column(db.String(255), nullable=False) sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id'))", "Use sth better than filenames result = {} try: song = djv.recognize(FileRecognizer, filepath)", "is indexed. \"\"\" return not media.sid == None class LikesDislikesSchema(Schema): \"\"\" Likes &", "provide another signature.') def mediaTypeValidator(data): \"\"\" Validate media type. \"\"\" if data and", "if likes: newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"]))", "self.seconds = seconds db.create_all() # marshmallow schemas def userSignatureValidator(data): \"\"\" Validate user signature.", "cannot be empty.') class UserSchema(Schema): \"\"\" User serialization/deserialization schema. \"\"\" signature = fields.Str(required=True,", "!= None: raise ValidationError('Please provide another signature.') def mediaTypeValidator(data): \"\"\" Validate media type.", "fields.Str(required=True) duration = fields.Int(default=0, missing=0) mtype = fields.Str(required=True, validate=mediaTypeValidator) url = fields.Url(load_only=True) indexed", "autoload_with=db.engine) __table__ = table id = table.c.song_id name = table.c.song_name class Media(db.Model): \"\"\"", "\"likes\") if likes: rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all() else: rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all() if not rating:", "LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True) def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower()", "jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>', methods=['GET']) @jwt_required def fingerprintStatusApi(sid): \"\"\" Retrieve the status of", "app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host']) app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate", "user = Users.query.filter_by(signature=get_jwt_identity()).first() if user == None or user.id != uid: raise Exception", "# SQLAlchemy models class Users(db.Model): \"\"\" Users model. \"\"\" id = db.Column(db.Integer, primary_key=True,", "#create if likes: newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data[\"seconds\"])) else: newRatings = Dislikes(user=uid, media=mid,", "Dislikes model. \"\"\" media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req", "class Media(db.Model): \"\"\" Media model. \"\"\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) name =", "primary key req user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True) seconds = db.Column(db.JSON, nullable=False) def", "\"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath = stream.download(TMP_DOWNLOAD_FOLDER) sid =", "media. \"\"\" if request.method == 'GET': media_list = Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list) return", "recognition activity. \"\"\" recognizer = recognizeMedia.AsyncResult(sid) if recognizer.ready(): if recognizer.successful(): result = recognizer.get()", "= Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL']) clry.conf.update(app.config) # create upload folders on app load if", "= yt.streams.filter(only_audio=True).all() stream = None for i in xrange(0, len(stream_list)): if stream_list[i].mime_type ==", "None: #fingerprint try: yt = YouTube(url) except Exception as err: return {\"data\":{\"msg\":\"Media unavailable.\"},", "MEDIUMINT from marshmallow import Schema, fields, ValidationError, pre_dump from flask_jwt_extended import create_access_token, get_jwt_identity,", "== None: raise Exception except Exception as e: abort(404, \"Media not found.\") likes", "init_config(\"CONFIG.json\") app = Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd']) app.config['CELERY_RESULT_BACKEND'] =", "stream unavailable.\"}, \"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False): os.mkdir(TMP_DOWNLOAD_FOLDER) try: filepath = stream.download(TMP_DOWNLOAD_FOLDER)", "else: #modify existingRatings.seconds = json.dumps(data[\"seconds\"]) db.session.commit() return jsonify({\"user\": uid, \"media\": mid, \"seconds\": data[\"seconds\"]}),", "@app.route('/media/<int:mid>', methods=['GET']) def mediaItemApi(mid): \"\"\" Retrieve the details for the media mid. \"\"\"", "config from a JSON file \"\"\" try: with open(configpath) as f: config =", "config config = init_config(\"CONFIG.json\") app = Flask(__name__) app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'],", "class LikesDislikesSchema(Schema): \"\"\" Likes & dislikes serialization/deserialization schema. \"\"\" #Discard seconds out of", "#TODO: Handle sids that don't exist if fingerprinter.ready(): if fingerprinter.successful(): result = fingerprinter.get()", "if recognizer.successful(): result = recognizer.get() return jsonify(result['data']), result['code'] if recognizer.failed(): return abort(500, \"Error", "serialization/deserialization schema. \"\"\" id = fields.Int(required=True, dump_only=True) name = fields.Str(required=True) author = fields.Str(required=True)", "or len(data) == 0: raise ValidationError('Seconds cannot be empty.') class UserSchema(Schema): \"\"\" User", "except ValidationError as err: return jsonify(err.messages), 400 asynctask = fingerprintMedia.delay(data) #TODO: Ensure celery", "and time index of a sampled media. \"\"\" #TODO: Improve recognition if 'file'", "if 'file' not in request.files: abort(400, \"No file.\") file = request.files['file'] if file.filename", "\"\"\" Retrieve the status of a fingerprinting task. \"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO:", "\"\"\" return not media.sid == None class LikesDislikesSchema(Schema): \"\"\" Likes & dislikes serialization/deserialization", "= MediaSchema(many=True) user_likes_schema = LikesDislikesSchema() media_likes_schema = LikesDislikesSchema(many=True) def allowed_file(filename): return '.' in", "db.session.refresh(user) token = create_access_token(identity=data['signature'], expires_delta=False) return jsonify({\"uid\":user.id, \"access_token\":token}) @app.route('/media', methods=['GET','POST']) @jwt_required def mediaApi():", "def emptyLikesValidator(data): \"\"\" Ensure likes is not empty. \"\"\" if not data or", "AudioSegment from dejavu import Dejavu from dejavu.recognize import FileRecognizer from flask import Flask,", "fields.Int(required=True, dump_only=True) media = fields.Int(required=True, dump_only=True) seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator) @pre_dump def", "elif request.method == 'POST': if not request.is_json or request.get_json() == None: abort(400, \"Json", "jsonify(err.messages), 400 user = Users(signature=data['signature']) db.session.add(user) db.session.commit() db.session.refresh(user) token = create_access_token(identity=data['signature'], expires_delta=False) return", "signature = db.Column(db.String(255), primary_key=True) def __init__(self, signature): \"\"\" Initialize class. \"\"\" self.signature =", "media.id, \"offset\": song['offset_seconds'], \"duration\": media.duration, \"match_time\": song['match_time'] } except Exception as e: return", "@jwt_required def mediaApi(): \"\"\" Add & retrieve media. \"\"\" if request.method == 'GET':", "ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp', 'ogg']) MEDIA_TYPES = ['television', 'movie', 'music'] def init_config(configpath):", "autoincrement=True) signature = db.Column(db.String(255), primary_key=True) def __init__(self, signature): \"\"\" Initialize class. \"\"\" self.signature", "import YouTube from pydub import AudioSegment from dejavu import Dejavu from dejavu.recognize import", "media_list = Media.query.order_by(Media.name).all() data = media_list_schema.dump(media_list) return jsonify(data), 200 elif request.method == 'POST':", "= json.load(f) except IOError as err: print(\"Cannot open configuration: %s. Exiting\" % (str(err)))", "\"seconds\": data[\"seconds\"]}), 201 elif request.method == 'PUT': if not existingRatings: abort(404, \"Ratings not", "= Users.query.filter_by(signature=data).first() if user != None: raise ValidationError('Please provide another signature.') def mediaTypeValidator(data):", "if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT: stream = stream_list[i] break; if stream == None: return", "#TODO: Ensure celery always recieves task b4 returning return jsonify({\"uuid\": asynctask.task_id}), 202 @app.route('/media/status/<uuid:sid>',", "fields.Method('check_indexed', dump_only=True) def check_indexed(self, media): \"\"\" Return Boolean indicator if media is indexed.", "jsonify({\"msg\": \"Success!\"}) abort(\"Bad installation\", 500) @app.route('/register', methods=['POST']) def registerApi(): \"\"\" Add a user", "db.Column(db.String(255), nullable=False) mtype = db.Column(db.String(255), nullable=False) sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id')) def __init__(self, name,", "a particular media. \"\"\" try: user = Users.query.filter_by(signature=get_jwt_identity()).first() if user == None or", "Initialize class. \"\"\" self.name = name self.duration = duration self.author = author self.mtype", "failed.\"}, \"code\": 500} if not song: return {\"data\":{\"msg\":\"Media not found.\"}, \"code\": 404} return", "djv.recognize(FileRecognizer, filepath) media = Media.query.filter_by(sid=song['song_id']).first() if media: print song['song_id'] result = { \"id\":", "result['code'] if fingerprinter.failed(): return abort(500, \"Error indexing media.\") return jsonify({\"uuid\": str(sid)}), 202 @app.route('/media/<int:mid>',", "\"\"\" Likes & dislikes serialization/deserialization schema. \"\"\" #Discard seconds out of timer window", "schemas def userSignatureValidator(data): \"\"\" Validate user signature. \"\"\" user = Users.query.filter_by(signature=data).first() if user", "jsonify(media_likes_schema.dump(rating)) @app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE']) @jwt_required def", "duration = db.Column(db.BigInteger, nullable=False) author = db.Column(db.String(255), nullable=False) mtype = db.Column(db.String(255), nullable=False) sid", "0: raise ValidationError('Seconds cannot be empty.') class UserSchema(Schema): \"\"\" User serialization/deserialization schema. \"\"\"", "\"Success!\"}) abort(\"Bad installation\", 500) @app.route('/register', methods=['POST']) def registerApi(): \"\"\" Add a user to", "table id = table.c.song_id name = table.c.song_name class Media(db.Model): \"\"\" Media model. \"\"\"", "id, name, author and time index of a sampled media. \"\"\" #TODO: Improve", "time index of a sampled media. \"\"\" #TODO: Improve recognition if 'file' not", "pre_dump from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/' TMP_UPLOAD_FOLDER =", "fingerprintMedia(self, media): \"\"\" Fingerprint and add a given media. \"\"\" url = media.get(\"url\",", "recognition if 'file' not in request.files: abort(400, \"No file.\") file = request.files['file'] if", "data not provided.\") json_data = request.get_json() try: data = user_schema.load(json_data) except ValidationError as", "status of a fingerprinting task. \"\"\" fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that", "= db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(255), nullable=False) duration = db.Column(db.BigInteger, nullable=False) author", "\"code\": 201} @clry.task(bind=True) def recognizeMedia(self, filepath): #TODO: Use sth better than filenames result", "'POST': if not request.is_json or request.get_json() == None: abort(400, \"Json data not provided.\")", "fields, ValidationError, pre_dump from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, JWTManager TMP_DOWNLOAD_FOLDER = '.tmp-download/'", "Media.query.get(mid) == None: abort(404, \"Media not found.\") except Exception as e: abort(404, \"Ratings", "url != None: #fingerprint try: yt = YouTube(url) except Exception as err: return", "if stream == None: return {\"data\":{\"msg\":\"Media stream unavailable.\"}, \"code\": 500} if (pth.isdir(TMP_DOWNLOAD_FOLDER) ==", "if Media.query.get(mid) == None: raise Exception except Exception as e: abort(404, \"Media not", "db.Column(db.BigInteger, nullable=False) author = db.Column(db.String(255), nullable=False) mtype = db.Column(db.String(255), nullable=False) sid = db.Column(MEDIUMINT(unsigned=True),", "== None: abort(404, \"Media not found.\") except Exception as e: abort(404, \"Ratings not", "= fields.Str(required=True) duration = fields.Int(default=0, missing=0) mtype = fields.Str(required=True, validate=mediaTypeValidator) url = fields.Url(load_only=True)" ]
[ "verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") #", "django.dispatch import receiver from django.utils.translation import ugettext_lazy as _ from .fields import JSONField", "kwargs: try: eta = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try:", "EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects =", "Cache to avoid re-looking up EdgeTypeAssociation objects all over the place. _cache =", ".fields import JSONField from .consistency_enforcers import * class EdgeTypeManager(models.Manager): # Cache to avoid", "direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects = EdgeTypeAssociationManager()", "= super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db, eta) return eta def get_for_direct_edge_type(self, et): try: eta", "models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects = models.Manager() on_site = CurrentSiteManager() def __unicode__(self): return (", "import Site from django.contrib.sites.managers import CurrentSiteManager from django.db import models from django.dispatch import", "['name'] verbose_name = _(u'Edge type') verbose_name_plural = _(u'Edge types') def __unicode__(self): return u'%s'", "EdgeTypeAssociation objects all over the place. _cache = {} _direct_cache = {} _inverse_cache", "= self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass if eta is None: eta = super(EdgeTypeAssociationManager, self).get(*args,", "= models.BooleanField(_(u'auto created'), default=False) objects = models.Manager() on_site = CurrentSiteManager() class Meta(object): unique_together", "eta) return eta def get_for_inverse_edge_type(self, et): try: eta = self.__class__._inverse_cache[self.db][et.id] except KeyError: eta", "if eta is None: eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db, eta) return eta", "in kwargs: try: eta = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs:", "= et def rem_from_cache(self, using, et): try: del self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using, {})[et.name]", "self.inverse } def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model): # fromNode", "self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError: pass def clear_cache(self):", "'direct': self.direct, 'inverse': self.inverse } def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using) class", "'to': self.toNode if self.toNode else '' } ) @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance,", "= self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass elif 'name' in kwargs: try: et = self.__class__._cache[self.db][kwargs['name']]", "( _(u'%(from)s %(verb)s %(to)s') % { 'from': self.fromNode if self.fromNode else '', 'verb':", "None: eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db, eta) return eta def get_for_direct_edge_type(self, et):", "{})[et.name] except KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type cache. \"\"\"", "except KeyError: pass elif 'pk' in kwargs: try: et = self.__class__._cache[self.db][kwargs['pk']] except KeyError:", "verbose_name=_(u'site'), related_name='edges') auto = models.BooleanField(_(u'auto created'), default=False) objects = models.Manager() on_site = CurrentSiteManager()", "except KeyError: pass if eta is None: eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db,", "= _(u'Edge types') def __unicode__(self): return u'%s' % self.name def setting_name(self): return self.name.upper()", "models.Manager() on_site = CurrentSiteManager() class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type',", "= self.__class__._cache[self.db][kwargs['name']] except KeyError: pass if et is None: et = super(EdgeTypeManager, self).get(*args,", "try: eta = self.__class__._inverse_cache[self.db][et.id] except KeyError: eta = self.get(inverse=et) self._add_to_cache(self.db, eta) return eta", "= self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try: et = self.__class__._cache[self.db][kwargs['pk']]", "eta def rem_from_cache(self, using, eta): try: del self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del", "TO ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge'", "get(self, *args, **kwargs): et = None if 'id' in kwargs: try: et =", "in kwargs: try: et = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass elif 'name' in kwargs:", "node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge", "fk_field=\"fromNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) # count count = models.IntegerField(_(u'count'), default=0)", "models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) #", "et is None: et = super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db, et) return et def", "if self.fromNode else '', 'count': self.count, 'type': self.type } ) class Meta(object): unique_together", "et = None if 'id' in kwargs: try: et = self.__class__._cache[self.db][kwargs['id']] except KeyError:", "return u\"%(direct)s <-> %(inverse)s\" % { 'direct': self.direct, 'inverse': self.inverse } def delete(self,", "self.name def setting_name(self): return self.name.upper() def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using) class", "generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) # count count = models.IntegerField(_(u'count'),", "'id' in kwargs: try: et = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in", "up EdgeTypeAssociation objects all over the place. _cache = {} _direct_cache = {}", "= models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects = models.Manager() on_site = CurrentSiteManager() def __unicode__(self): return", "models.ForeignKey(ContentType, verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\")", "'site', getattr(instance.toNode, 'site', Site.objects.get_current())) class EdgeCount(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from", "self.fromNode else '', 'verb': self.type.read_as, 'to': self.toNode if self.toNode else '' } )", "del self.__class__._cache.setdefault(using, {})[et.name] except KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type", "self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager): # Cache to avoid re-looking up EdgeTypeAssociation", "def get_for_inverse_edge_type(self, et): try: eta = self.__class__._inverse_cache[self.db][et.id] except KeyError: eta = self.get(inverse=et) self._add_to_cache(self.db,", "edge attributes type = models.ForeignKey(EdgeType) # count count = models.IntegerField(_(u'count'), default=0) site =", "'' } ) @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs): if not instance.site_id: instance.site", "# edge metadata time = models.DateTimeField(_(u'time'), auto_now_add=True) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto", "the edge-type cache. \"\"\" self.__class__._cache.clear() class EdgeType(models.Model): name = models.CharField(_(u'name'), max_length=100, unique=True) read_as", "types') def __unicode__(self): return u'%s' % self.name def setting_name(self): return self.name.upper() def delete(self,", "related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode field toNode_type", "has %(count)d %(type)s edge(s)') % { 'from': self.fromNode if self.fromNode else '', 'count':", "CONNECT LISTENERS TO ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge,", "ordering = ['name'] verbose_name = _(u'Edge type') verbose_name_plural = _(u'Edge types') def __unicode__(self):", "models.ForeignKey(ContentType, verbose_name=_(u'from node type')) fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") #", "# count count = models.IntegerField(_(u'count'), default=0) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects =", "except KeyError: pass if et is None: et = super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db,", "dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect( EdgeCleaner.clean_edges, dispatch_uid='clean_edges' ) # Clear", "\"\"\" Clear out the edge-type-association cache. \"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType,", "if 'id' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk'", "from django.db import models from django.dispatch import receiver from django.utils.translation import ugettext_lazy as", "= {} _direct_cache = {} _inverse_cache = {} def get(self, *args, **kwargs): eta", "getattr(instance.fromNode, 'site', Site.objects.get_current()) # CONNECT LISTENERS TO ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge,", "setting_name(self): return self.name.upper() def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager): #", "EdgeCount(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type')) fromNode_pk = models.TextField(_(u'fromNode", "'id' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in", "import ContentType from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.db import", "*args, **kwargs): eta = None if 'id' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['id']]", "from django.utils.translation import ugettext_lazy as _ from .fields import JSONField from .consistency_enforcers import", "self.__class__._inverse_cache[self.db][et.id] except KeyError: eta = self.get(inverse=et) self._add_to_cache(self.db, eta) return eta def _add_to_cache(self, using,", "= super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db, et) return et def _add_to_cache(self, using, et): self.__class__._cache.setdefault(using,", "related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge attributes type", "= EdgeTypeManager() class Meta(object): ordering = ['name'] verbose_name = _(u'Edge type') verbose_name_plural =", "dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect(", "self.type } ) class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount,", "sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' )", "self._add_to_cache(self.db, eta) return eta def _add_to_cache(self, using, eta): self.__class__._cache.setdefault(using, {})[eta.id] = eta self.__class__._direct_cache.setdefault(using,", "= ['-time'] def __unicode__(self): return ( _(u'%(from)s %(verb)s %(to)s') % { 'from': self.fromNode", "EdgeTypeAssociationManager(models.Manager): # Cache to avoid re-looking up EdgeTypeAssociation objects all over the place.", "'site', Site.objects.get_current())) class EdgeCount(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type'))", "del self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError: pass def", "eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta def rem_from_cache(self, using, eta):", "sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect( EdgeCleaner.clean_edges, dispatch_uid='clean_edges' ) # Clear the EdgeType cache EdgeType.objects.clear_cache()", "return eta def get_for_inverse_edge_type(self, et): try: eta = self.__class__._inverse_cache[self.db][et.id] except KeyError: eta =", "= generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'), default='{}')", "fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) # count count", "fk_field=\"toNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'), default='{}') # edge", "{})[et.name] = et def rem_from_cache(self, using, et): try: del self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using,", "Site from django.contrib.sites.managers import CurrentSiteManager from django.db import models from django.dispatch import receiver", "= getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current())) class EdgeCount(models.Model): # fromNode field fromNode_type =", "verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") #", "**kwargs) self._add_to_cache(self.db, et) return et def _add_to_cache(self, using, et): self.__class__._cache.setdefault(using, {})[et.id] = et", "= JSONField(_(u'attributes'), default='{}') # edge metadata time = models.DateTimeField(_(u'time'), auto_now_add=True) site = models.ForeignKey(Site,", "_(u'Edge types') def __unicode__(self): return u'%s' % self.name def setting_name(self): return self.name.upper() def", "kwargs: try: eta = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass if eta is None: eta", "models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode field toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to", "try: eta = self.__class__._direct_cache[self.db][et.id] except KeyError: eta = self.get(direct=et) self._add_to_cache(self.db, eta) return eta", "KeyError: pass elif 'name' in kwargs: try: et = self.__class__._cache[self.db][kwargs['name']] except KeyError: pass", "cache. \"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse = models.ForeignKey(EdgeType,", "pass elif 'name' in kwargs: try: et = self.__class__._cache[self.db][kwargs['name']] except KeyError: pass if", "= models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge attributes type = models.ForeignKey(EdgeType)", "_add_to_cache(self, using, et): self.__class__._cache.setdefault(using, {})[et.id] = et self.__class__._cache.setdefault(using, {})[et.name] = et def rem_from_cache(self,", ") models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count,", "% { 'from': self.fromNode if self.fromNode else '', 'verb': self.type.read_as, 'to': self.toNode if", "self.direct, 'inverse': self.inverse } def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model):", "EdgeTypeAssociationManager() def __unicode__(self): return u\"%(direct)s <-> %(inverse)s\" % { 'direct': self.direct, 'inverse': self.inverse", "= models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects = EdgeTypeAssociationManager() def", "generic from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager", "= self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['pk']]", "except KeyError: eta = self.get(direct=et) self._add_to_cache(self.db, eta) return eta def get_for_inverse_edge_type(self, et): try:", "KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type-association cache. \"\"\" self.__class__._cache.clear() class", "instance.site_id: instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current()) # CONNECT LISTENERS TO ENFORCE GRAPH CONSISTENCY", "verbose_name=_(u'from node type')) fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge", "= et self.__class__._cache.setdefault(using, {})[et.name] = et def rem_from_cache(self, using, et): try: del self.__class__._cache.setdefault(using,", "models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects = EdgeTypeAssociationManager() def __unicode__(self):", "if et is None: et = super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db, et) return et", "models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge,", "Site.objects.get_current())) class EdgeCount(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type')) fromNode_pk", "{})[et.id] = et self.__class__._cache.setdefault(using, {})[et.name] = et def rem_from_cache(self, using, et): try: del", "is None: eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db, eta) return eta def get_for_direct_edge_type(self,", "{})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError: pass def clear_cache(self): \"\"\"", "# fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type')) fromNode_pk = models.TextField(_(u'fromNode ID'))", "try: eta = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try: eta", "get_for_direct_edge_type(self, et): try: eta = self.__class__._direct_cache[self.db][et.id] except KeyError: eta = self.get(direct=et) self._add_to_cache(self.db, eta)", "# coding=utf-8 from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import", "django.db import models from django.dispatch import receiver from django.utils.translation import ugettext_lazy as _", "def __unicode__(self): return u'%s' % self.name def setting_name(self): return self.name.upper() def delete(self, using=None):", "models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto = models.BooleanField(_(u'auto created'), default=False) objects = models.Manager() on_site =", "node type')) fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge attributes", "self._add_to_cache(self.db, et) return et def _add_to_cache(self, using, et): self.__class__._cache.setdefault(using, {})[et.id] = et self.__class__._cache.setdefault(using,", "read_as = models.CharField(_(u'read as'), max_length=100) objects = EdgeTypeManager() class Meta(object): ordering = ['name']", "_(u'%(from)s %(verb)s %(to)s') % { 'from': self.fromNode if self.fromNode else '', 'verb': self.type.read_as,", "not instance.site_id: instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current())) class EdgeCount(models.Model): # fromNode", "models.CharField(_(u'read as'), max_length=100) objects = EdgeTypeManager() class Meta(object): ordering = ['name'] verbose_name =", "= models.ForeignKey(EdgeType) # count count = models.IntegerField(_(u'count'), default=0) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters')", "def _add_to_cache(self, using, eta): self.__class__._cache.setdefault(using, {})[eta.id] = eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using,", "inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects = EdgeTypeAssociationManager() def __unicode__(self): return u\"%(direct)s <->", "toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'),", "( _(u'%(from)s has %(count)d %(type)s edge(s)') % { 'from': self.fromNode if self.fromNode else", "return ( _(u'%(from)s %(verb)s %(to)s') % { 'from': self.fromNode if self.fromNode else '',", "eta): try: del self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError:", "self._add_to_cache(self.db, eta) return eta def get_for_direct_edge_type(self, et): try: eta = self.__class__._direct_cache[self.db][et.id] except KeyError:", "import receiver from django.utils.translation import ugettext_lazy as _ from .fields import JSONField from", "edge-type cache. \"\"\" self.__class__._cache.clear() class EdgeType(models.Model): name = models.CharField(_(u'name'), max_length=100, unique=True) read_as =", "eta) return eta def _add_to_cache(self, using, eta): self.__class__._cache.setdefault(using, {})[eta.id] = eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id]", "self._add_to_cache(self.db, eta) return eta def get_for_inverse_edge_type(self, et): try: eta = self.__class__._inverse_cache[self.db][et.id] except KeyError:", "self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try: et = self.__class__._cache[self.db][kwargs['pk']] except", "django.utils.translation import ugettext_lazy as _ from .fields import JSONField from .consistency_enforcers import *", "kwargs: try: et = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try:", "avoid re-looking up EdgeTypeAssociation objects all over the place. _cache = {} _direct_cache", "from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from", "class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance,", "'count': self.count, 'type': self.type } ) class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'type',", "* class EdgeTypeManager(models.Manager): # Cache to avoid re-looking up EdgeType objects all over", "pre_save_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current())) class", "class Meta(object): ordering = ['name'] verbose_name = _(u'Edge type') verbose_name_plural = _(u'Edge types')", "default='{}') # edge metadata time = models.DateTimeField(_(u'time'), auto_now_add=True) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges')", "'toNode_pk', 'type', 'site'] ordering = ['-time'] def __unicode__(self): return ( _(u'%(from)s %(verb)s %(to)s')", "'name' in kwargs: try: et = self.__class__._cache[self.db][kwargs['name']] except KeyError: pass if et is", ") models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect( EdgeCleaner.clean_edges, dispatch_uid='clean_edges' ) # Clear the", "'inverse': self.inverse } def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model): #", "ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' )", "return et def _add_to_cache(self, using, et): self.__class__._cache.setdefault(using, {})[et.id] = et self.__class__._cache.setdefault(using, {})[et.name] =", "KeyError: pass if et is None: et = super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db, et)", "eta = None if 'id' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['id']] except KeyError:", "= models.ForeignKey(ContentType, verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\",", "else '' } ) @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs): if not instance.site_id:", "objects all over the place. _cache = {} def get(self, *args, **kwargs): et", "# Cache to avoid re-looking up EdgeType objects all over the place. _cache", "is None: et = super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db, et) return et def _add_to_cache(self,", "related_name='is_inverse_in') objects = EdgeTypeAssociationManager() def __unicode__(self): return u\"%(direct)s <-> %(inverse)s\" % { 'direct':", "ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) # count", "super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db, eta) return eta def get_for_direct_edge_type(self, et): try: eta =", "EdgeTypeManager(models.Manager): # Cache to avoid re-looking up EdgeType objects all over the place.", "**kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current())) class EdgeCount(models.Model):", "__unicode__(self): return u'%s' % self.name def setting_name(self): return self.name.upper() def delete(self, using=None): self.__class__.objects.rem_from_cache(using,", "the edge-type-association cache. \"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse", "models.BooleanField(_(u'auto created'), default=False) objects = models.Manager() on_site = CurrentSiteManager() class Meta(object): unique_together =", "SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association'", "EdgeTypeManager() class Meta(object): ordering = ['name'] verbose_name = _(u'Edge type') verbose_name_plural = _(u'Edge", "% self.name def setting_name(self): return self.name.upper() def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using)", "class EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects", "type = models.ForeignKey(EdgeType) # count count = models.IntegerField(_(u'count'), default=0) site = models.ForeignKey(Site, verbose_name=_(u'site'),", "'site', Site.objects.get_current()) # CONNECT LISTENERS TO ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge'", "kwargs: try: et = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass elif 'name' in kwargs: try:", "'type', 'site'] ordering = ['-time'] def __unicode__(self): return ( _(u'%(from)s %(verb)s %(to)s') %", ") models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association,", "fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode field toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to node type'),", "unique=True, related_name='is_inverse_in') objects = EdgeTypeAssociationManager() def __unicode__(self): return u\"%(direct)s <-> %(inverse)s\" % {", "re-looking up EdgeTypeAssociation objects all over the place. _cache = {} _direct_cache =", "self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type-association cache.", "'verb': self.type.read_as, 'to': self.toNode if self.toNode else '' } ) @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge')", "using, et): try: del self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using, {})[et.name] except KeyError: pass def", "models.DateTimeField(_(u'time'), auto_now_add=True) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto = models.BooleanField(_(u'auto created'), default=False) objects", "attributes = JSONField(_(u'attributes'), default='{}') # edge metadata time = models.DateTimeField(_(u'time'), auto_now_add=True) site =", "field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode", "'', 'verb': self.type.read_as, 'to': self.toNode if self.toNode else '' } ) @receiver(models.signals.pre_save, sender=Edge,", "= {} def get(self, *args, **kwargs): et = None if 'id' in kwargs:", "KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type cache. \"\"\" self.__class__._cache.clear() class", "'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode,", "def rem_from_cache(self, using, et): try: del self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using, {})[et.name] except KeyError:", "eta = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass if eta is None: eta = super(EdgeTypeAssociationManager,", "count = models.IntegerField(_(u'count'), default=0) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects = models.Manager() on_site", "try: del self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using, {})[et.name] except KeyError: pass def clear_cache(self): \"\"\"", "} def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model): # fromNode field", "u'%s' % self.name def setting_name(self): return self.name.upper() def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType,", "self).get(*args, **kwargs) self._add_to_cache(self.db, et) return et def _add_to_cache(self, using, et): self.__class__._cache.setdefault(using, {})[et.id] =", "= self.get(inverse=et) self._add_to_cache(self.db, eta) return eta def _add_to_cache(self, using, eta): self.__class__._cache.setdefault(using, {})[eta.id] =", "__unicode__(self): return ( _(u'%(from)s has %(count)d %(type)s edge(s)') % { 'from': self.fromNode if", "None if 'id' in kwargs: try: et = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif", "et): try: del self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using, {})[et.name] except KeyError: pass def clear_cache(self):", "= models.CharField(_(u'name'), max_length=100, unique=True) read_as = models.CharField(_(u'read as'), max_length=100) objects = EdgeTypeManager() class", "using, et): self.__class__._cache.setdefault(using, {})[et.id] = et self.__class__._cache.setdefault(using, {})[et.name] = et def rem_from_cache(self, using,", "ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) attributes =", "super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type'),", "ordering = ['-time'] def __unicode__(self): return ( _(u'%(from)s %(verb)s %(to)s') % { 'from':", "models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect( EdgeCleaner.clean_edges, dispatch_uid='clean_edges' ) # Clear the EdgeType", "else '', 'verb': self.type.read_as, 'to': self.toNode if self.toNode else '' } ) @receiver(models.signals.pre_save,", "self) super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node", "pass if eta is None: eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db, eta) return", "eta = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try: eta =", "self.toNode if self.toNode else '' } ) @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs):", "objects = EdgeTypeAssociationManager() def __unicode__(self): return u\"%(direct)s <-> %(inverse)s\" % { 'direct': self.direct,", "eta): self.__class__._cache.setdefault(using, {})[eta.id] = eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta", "field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type')) fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode =", "rem_from_cache(self, using, eta): try: del self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id]", "self.name.upper() def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager): # Cache to", "name = models.CharField(_(u'name'), max_length=100, unique=True) read_as = models.CharField(_(u'read as'), max_length=100) objects = EdgeTypeManager()", "default=False) objects = models.Manager() on_site = CurrentSiteManager() class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk',", "None: et = super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db, et) return et def _add_to_cache(self, using,", "except KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type-association cache. \"\"\" self.__class__._cache.clear()", "verbose_name_plural = _(u'Edge types') def __unicode__(self): return u'%s' % self.name def setting_name(self): return", "self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from", "return u'%s' % self.name def setting_name(self): return self.name.upper() def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self)", "on_site = CurrentSiteManager() class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site']", "Clear out the edge-type cache. \"\"\" self.__class__._cache.clear() class EdgeType(models.Model): name = models.CharField(_(u'name'), max_length=100,", "Meta(object): ordering = ['name'] verbose_name = _(u'Edge type') verbose_name_plural = _(u'Edge types') def", "objects = EdgeTypeManager() class Meta(object): ordering = ['name'] verbose_name = _(u'Edge type') verbose_name_plural", "from .consistency_enforcers import * class EdgeTypeManager(models.Manager): # Cache to avoid re-looking up EdgeType", "= ['name'] verbose_name = _(u'Edge type') verbose_name_plural = _(u'Edge types') def __unicode__(self): return", "def _add_to_cache(self, using, et): self.__class__._cache.setdefault(using, {})[et.id] = et self.__class__._cache.setdefault(using, {})[et.name] = et def", "{} def get(self, *args, **kwargs): eta = None if 'id' in kwargs: try:", "generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode field toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk", "EdgeType(models.Model): name = models.CharField(_(u'name'), max_length=100, unique=True) read_as = models.CharField(_(u'read as'), max_length=100) objects =", "self.type.read_as, 'to': self.toNode if self.toNode else '' } ) @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def", "Edge(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk =", "node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode", "self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError: pass def clear_cache(self): \"\"\" Clear out", "self).get(*args, **kwargs) self._add_to_cache(self.db, eta) return eta def get_for_direct_edge_type(self, et): try: eta = self.__class__._direct_cache[self.db][et.id]", "fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode ID'))", "type')) fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge attributes type", "try: et = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try: et", "self.__class__._direct_cache[self.db][et.id] except KeyError: eta = self.get(direct=et) self._add_to_cache(self.db, eta) return eta def get_for_inverse_edge_type(self, et):", "'toNode_type', 'toNode_pk', 'type', 'site'] ordering = ['-time'] def __unicode__(self): return ( _(u'%(from)s %(verb)s", "et): try: eta = self.__class__._inverse_cache[self.db][et.id] except KeyError: eta = self.get(inverse=et) self._add_to_cache(self.db, eta) return", "models from django.dispatch import receiver from django.utils.translation import ugettext_lazy as _ from .fields", "self.fromNode if self.fromNode else '', 'verb': self.type.read_as, 'to': self.toNode if self.toNode else ''", "'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs): if not instance.site_id: instance.site", "% { 'direct': self.direct, 'inverse': self.inverse } def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation,", "GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect(", "{})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta def rem_from_cache(self, using, eta): try: del", "default=0) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects = models.Manager() on_site = CurrentSiteManager() def", "attributes type = models.ForeignKey(EdgeType) # count count = models.IntegerField(_(u'count'), default=0) site = models.ForeignKey(Site,", "LISTENERS TO ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge,", "_(u'Edge type') verbose_name_plural = _(u'Edge types') def __unicode__(self): return u'%s' % self.name def", "class EdgeTypeAssociationManager(models.Manager): # Cache to avoid re-looking up EdgeTypeAssociation objects all over the", "= models.ForeignKey(ContentType, verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\",", "else '', 'count': self.count, 'type': self.type } ) class Meta(object): unique_together = ['fromNode_type',", "= models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode field toNode_type = models.ForeignKey(ContentType,", "{} _direct_cache = {} _inverse_cache = {} def get(self, *args, **kwargs): eta =", "__unicode__(self): return ( _(u'%(from)s %(verb)s %(to)s') % { 'from': self.fromNode if self.fromNode else", "delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model): # fromNode field fromNode_type =", "= models.IntegerField(_(u'count'), default=0) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects = models.Manager() on_site =", "{ 'direct': self.direct, 'inverse': self.inverse } def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using)", "as'), max_length=100) objects = EdgeTypeManager() class Meta(object): ordering = ['name'] verbose_name = _(u'Edge", "pass if et is None: et = super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db, et) return", "= ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site'] ordering = ['-time'] def __unicode__(self): return", "def get(self, *args, **kwargs): eta = None if 'id' in kwargs: try: eta", "def setting_name(self): return self.name.upper() def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager):", "**kwargs) self._add_to_cache(self.db, eta) return eta def get_for_direct_edge_type(self, et): try: eta = self.__class__._direct_cache[self.db][et.id] except", "created'), default=False) objects = models.Manager() on_site = CurrentSiteManager() class Meta(object): unique_together = ['fromNode_type',", "models.CharField(_(u'name'), max_length=100, unique=True) read_as = models.CharField(_(u'read as'), max_length=100) objects = EdgeTypeManager() class Meta(object):", "= models.Manager() on_site = CurrentSiteManager() class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk',", "et) return et def _add_to_cache(self, using, et): self.__class__._cache.setdefault(using, {})[et.id] = et self.__class__._cache.setdefault(using, {})[et.name]", "_cache = {} def get(self, *args, **kwargs): et = None if 'id' in", "sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode,", "{} _inverse_cache = {} def get(self, *args, **kwargs): eta = None if 'id'", "= _(u'Edge type') verbose_name_plural = _(u'Edge types') def __unicode__(self): return u'%s' % self.name", "import generic from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.contrib.sites.managers import", "def get(self, *args, **kwargs): et = None if 'id' in kwargs: try: et", "out the edge-type cache. \"\"\" self.__class__._cache.clear() class EdgeType(models.Model): name = models.CharField(_(u'name'), max_length=100, unique=True)", "= CurrentSiteManager() class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site'] ordering", "'from': self.fromNode if self.fromNode else '', 'count': self.count, 'type': self.type } ) class", "del self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using, {})[et.name] except KeyError: pass def clear_cache(self): \"\"\" Clear", "def pre_save_count_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current()) # CONNECT", "= ['fromNode_type', 'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs): if not", "_add_to_cache(self, using, eta): self.__class__._cache.setdefault(using, {})[eta.id] = eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id]", "import CurrentSiteManager from django.db import models from django.dispatch import receiver from django.utils.translation import", "all over the place. _cache = {} def get(self, *args, **kwargs): et =", "dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect(", "%(inverse)s\" % { 'direct': self.direct, 'inverse': self.inverse } def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self)", "max_length=100) objects = EdgeTypeManager() class Meta(object): ordering = ['name'] verbose_name = _(u'Edge type')", "sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' )", "objects = models.Manager() on_site = CurrentSiteManager() def __unicode__(self): return ( _(u'%(from)s has %(count)d", "if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current()) # CONNECT LISTENERS TO ENFORCE", "KeyError: pass if eta is None: eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db, eta)", "on_site = CurrentSiteManager() def __unicode__(self): return ( _(u'%(from)s has %(count)d %(type)s edge(s)') %", "self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass elif 'name' in kwargs: try: et = self.__class__._cache[self.db][kwargs['name']] except", "self.__class__._cache[self.db][kwargs['name']] except KeyError: pass if et is None: et = super(EdgeTypeManager, self).get(*args, **kwargs)", "all over the place. _cache = {} _direct_cache = {} _inverse_cache = {}", "attributes type = models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'), default='{}') # edge metadata time =", "= models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto = models.BooleanField(_(u'auto created'), default=False) objects = models.Manager() on_site", "\"\"\" Clear out the edge-type cache. \"\"\" self.__class__._cache.clear() class EdgeType(models.Model): name = models.CharField(_(u'name'),", "class Edge(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk", "instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current()) # CONNECT LISTENERS TO ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect(", "def rem_from_cache(self, using, eta): try: del self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using,", "KeyError: eta = self.get(inverse=et) self._add_to_cache(self.db, eta) return eta def _add_to_cache(self, using, eta): self.__class__._cache.setdefault(using,", "models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'), default='{}') # edge metadata time = models.DateTimeField(_(u'time'), auto_now_add=True) site", "pass def clear_cache(self): \"\"\" Clear out the edge-type-association cache. \"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model):", "def clear_cache(self): \"\"\" Clear out the edge-type-association cache. \"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct", "in kwargs: try: et = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs:", "self.fromNode if self.fromNode else '', 'count': self.count, 'type': self.type } ) class Meta(object):", "eta = self.__class__._direct_cache[self.db][et.id] except KeyError: eta = self.get(direct=et) self._add_to_cache(self.db, eta) return eta def", "_direct_cache = {} _inverse_cache = {} def get(self, *args, **kwargs): eta = None", "# edge attributes type = models.ForeignKey(EdgeType) # count count = models.IntegerField(_(u'count'), default=0) site", "eta def get_for_direct_edge_type(self, et): try: eta = self.__class__._direct_cache[self.db][et.id] except KeyError: eta = self.get(direct=et)", "EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect( EdgeCleaner.clean_edges, dispatch_uid='clean_edges' )", ") class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def", "except KeyError: pass elif 'name' in kwargs: try: et = self.__class__._cache[self.db][kwargs['name']] except KeyError:", "place. _cache = {} def get(self, *args, **kwargs): et = None if 'id'", "if self.toNode else '' } ) @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs): if", "type = models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'), default='{}') # edge metadata time = models.DateTimeField(_(u'time'),", "'from': self.fromNode if self.fromNode else '', 'verb': self.type.read_as, 'to': self.toNode if self.toNode else", "# fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode", "SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count'", "place. _cache = {} _direct_cache = {} _inverse_cache = {} def get(self, *args,", "objects = models.Manager() on_site = CurrentSiteManager() class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type',", "getattr(instance.toNode, 'site', Site.objects.get_current())) class EdgeCount(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node", "'type': self.type } ) class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save,", "self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass if eta is None: eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs)", "import JSONField from .consistency_enforcers import * class EdgeTypeManager(models.Manager): # Cache to avoid re-looking", "elif 'pk' in kwargs: try: et = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass elif 'name'", "Clear out the edge-type-association cache. \"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType, unique=True,", "related_name='is_direct_in') inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects = EdgeTypeAssociationManager() def __unicode__(self): return u\"%(direct)s", "= {} def get(self, *args, **kwargs): eta = None if 'id' in kwargs:", "del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError: pass def clear_cache(self): \"\"\" Clear", "eta is None: eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db, eta) return eta def", "from .fields import JSONField from .consistency_enforcers import * class EdgeTypeManager(models.Manager): # Cache to", "*args, **kwargs): et = None if 'id' in kwargs: try: et = self.__class__._cache[self.db][kwargs['id']]", "{})[eta.inverse.id] = eta def rem_from_cache(self, using, eta): try: del self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using,", "% { 'from': self.fromNode if self.fromNode else '', 'count': self.count, 'type': self.type }", "using, eta): try: del self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except", "_(u'%(from)s has %(count)d %(type)s edge(s)') % { 'from': self.fromNode if self.fromNode else '',", "field toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode ID')) toNode", "clear_cache(self): \"\"\" Clear out the edge-type cache. \"\"\" self.__class__._cache.clear() class EdgeType(models.Model): name =", "= {} _inverse_cache = {} def get(self, *args, **kwargs): eta = None if", "= models.Manager() on_site = CurrentSiteManager() def __unicode__(self): return ( _(u'%(from)s has %(count)d %(type)s", "fk_field=\"fromNode_pk\") # toNode field toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk =", "= models.TextField(_(u'toNode ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge attributes type = models.ForeignKey(EdgeType)", "kwargs: try: et = self.__class__._cache[self.db][kwargs['name']] except KeyError: pass if et is None: et", "return self.name.upper() def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager): # Cache", "_cache = {} _direct_cache = {} _inverse_cache = {} def get(self, *args, **kwargs):", "= CurrentSiteManager() def __unicode__(self): return ( _(u'%(from)s has %(count)d %(type)s edge(s)') % {", "self.fromNode else '', 'count': self.count, 'type': self.type } ) class Meta(object): unique_together =", "dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect(", "models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation,", "as _ from .fields import JSONField from .consistency_enforcers import * class EdgeTypeManager(models.Manager): #", "return eta def _add_to_cache(self, using, eta): self.__class__._cache.setdefault(using, {})[eta.id] = eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] =", "count count = models.IntegerField(_(u'count'), default=0) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects = models.Manager()", "try: et = self.__class__._cache[self.db][kwargs['name']] except KeyError: pass if et is None: et =", "# CONNECT LISTENERS TO ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect(", "models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation,", "self).delete(using) class Edge(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\")", "fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge attributes type =", "et self.__class__._cache.setdefault(using, {})[et.name] = et def rem_from_cache(self, using, et): try: del self.__class__._cache.setdefault(using, {})[et.id]", "'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs): if not instance.site_id: instance.site =", "# edge attributes type = models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'), default='{}') # edge metadata", "models.TextField(_(u'toNode ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) attributes", "edge(s)') % { 'from': self.fromNode if self.fromNode else '', 'count': self.count, 'type': self.type", "Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site'] ordering = ['-time'] def", "et): try: eta = self.__class__._direct_cache[self.db][et.id] except KeyError: eta = self.get(direct=et) self._add_to_cache(self.db, eta) return", "auto_now_add=True) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto = models.BooleanField(_(u'auto created'), default=False) objects =", "auto = models.BooleanField(_(u'auto created'), default=False) objects = models.Manager() on_site = CurrentSiteManager() class Meta(object):", "Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs):", "ContentType from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.db import models", "= None if 'id' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass", "in kwargs: try: eta = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass if eta is None:", "} ) class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count')", "self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta def rem_from_cache(self, using, eta): try:", "site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto = models.BooleanField(_(u'auto created'), default=False) objects = models.Manager()", "except KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type cache. \"\"\" self.__class__._cache.clear()", "avoid re-looking up EdgeType objects all over the place. _cache = {} def", "except KeyError: pass elif 'pk' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['pk']] except KeyError:", "type') verbose_name_plural = _(u'Edge types') def __unicode__(self): return u'%s' % self.name def setting_name(self):", "# toNode field toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode", "self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['pk']] except", "self.__class__._cache.setdefault(using, {})[et.id] = et self.__class__._cache.setdefault(using, {})[et.name] = et def rem_from_cache(self, using, et): try:", "= generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) # count count =", "super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager): # Cache to avoid re-looking up EdgeTypeAssociation objects all", "if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current())) class EdgeCount(models.Model): #", "from django.dispatch import receiver from django.utils.translation import ugettext_lazy as _ from .fields import", "get(self, *args, **kwargs): eta = None if 'id' in kwargs: try: eta =", "%(count)d %(type)s edge(s)') % { 'from': self.fromNode if self.fromNode else '', 'count': self.count,", "getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current())) class EdgeCount(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType,", "def __unicode__(self): return ( _(u'%(from)s has %(count)d %(type)s edge(s)') % { 'from': self.fromNode", "the place. _cache = {} def get(self, *args, **kwargs): et = None if", "models.IntegerField(_(u'count'), default=0) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects = models.Manager() on_site = CurrentSiteManager()", "['fromNode_type', 'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs): if not instance.site_id:", "eta) return eta def get_for_direct_edge_type(self, et): try: eta = self.__class__._direct_cache[self.db][et.id] except KeyError: eta", "['-time'] def __unicode__(self): return ( _(u'%(from)s %(verb)s %(to)s') % { 'from': self.fromNode if", "unique_together = ['fromNode_type', 'fromNode_pk', 'type', 'site'] @receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs): if", "eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs) self._add_to_cache(self.db, eta) return eta def get_for_direct_edge_type(self, et): try:", "self.count, 'type': self.type } ) class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'type', 'site']", "%(verb)s %(to)s') % { 'from': self.fromNode if self.fromNode else '', 'verb': self.type.read_as, 'to':", "{} def get(self, *args, **kwargs): et = None if 'id' in kwargs: try:", "= EdgeTypeAssociationManager() def __unicode__(self): return u\"%(direct)s <-> %(inverse)s\" % { 'direct': self.direct, 'inverse':", "{ 'from': self.fromNode if self.fromNode else '', 'count': self.count, 'type': self.type } )", "'', 'count': self.count, 'type': self.type } ) class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk',", "pass elif 'pk' in kwargs: try: et = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass elif", "instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current())) class EdgeCount(models.Model): # fromNode field fromNode_type", "{})[eta.inverse.id] except KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type-association cache. \"\"\"", "= getattr(instance.fromNode, 'site', Site.objects.get_current()) # CONNECT LISTENERS TO ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge,", "KeyError: pass elif 'pk' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass", "self.__class__._cache.setdefault(using, {})[et.name] = et def rem_from_cache(self, using, et): try: del self.__class__._cache.setdefault(using, {})[et.id] del", "verbose_name=_(u'site'), related_name='edge_counters') objects = models.Manager() on_site = CurrentSiteManager() def __unicode__(self): return ( _(u'%(from)s", "class EdgeCount(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type')) fromNode_pk =", "objects all over the place. _cache = {} _direct_cache = {} _inverse_cache =", "class EdgeType(models.Model): name = models.CharField(_(u'name'), max_length=100, unique=True) read_as = models.CharField(_(u'read as'), max_length=100) objects", "up EdgeType objects all over the place. _cache = {} def get(self, *args,", "toNode_pk = models.TextField(_(u'toNode ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge attributes type =", "Cache to avoid re-looking up EdgeType objects all over the place. _cache =", "using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager): # Cache to avoid re-looking up", "CurrentSiteManager from django.db import models from django.dispatch import receiver from django.utils.translation import ugettext_lazy", "= eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta def rem_from_cache(self, using, eta): try: del self.__class__._cache.setdefault(using,", "{ 'from': self.fromNode if self.fromNode else '', 'verb': self.type.read_as, 'to': self.toNode if self.toNode", "fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode =", "%(to)s') % { 'from': self.fromNode if self.fromNode else '', 'verb': self.type.read_as, 'to': self.toNode", "sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current())", "elif 'pk' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass if eta", "KeyError: eta = self.get(direct=et) self._add_to_cache(self.db, eta) return eta def get_for_inverse_edge_type(self, et): try: eta", "'pk' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass if eta is", "{})[eta.id] = eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta def rem_from_cache(self,", "django.contrib.sites.managers import CurrentSiteManager from django.db import models from django.dispatch import receiver from django.utils.translation", "edge-type-association cache. \"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse =", "type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode field", "using, eta): self.__class__._cache.setdefault(using, {})[eta.id] = eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] =", "fromNode field fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type')) fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode", "delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager): # Cache to avoid re-looking", "related_name='edge_counters') objects = models.Manager() on_site = CurrentSiteManager() def __unicode__(self): return ( _(u'%(from)s has", "to avoid re-looking up EdgeType objects all over the place. _cache = {}", "re-looking up EdgeType objects all over the place. _cache = {} def get(self,", "sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' )", "CurrentSiteManager() class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site'] ordering =", "django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.contrib.sites.managers", "{})[et.id] del self.__class__._cache.setdefault(using, {})[et.name] except KeyError: pass def clear_cache(self): \"\"\" Clear out the", "ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode field toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to node", "\"\"\" self.__class__._cache.clear() class EdgeType(models.Model): name = models.CharField(_(u'name'), max_length=100, unique=True) read_as = models.CharField(_(u'read as'),", "type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode ID')) toNode = generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge attributes", "= models.DateTimeField(_(u'time'), auto_now_add=True) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto = models.BooleanField(_(u'auto created'), default=False)", "toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode ID')) toNode =", "'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site'] ordering = ['-time'] def __unicode__(self): return ( _(u'%(from)s", "generic.GenericForeignKey(ct_field=\"toNode_type\", fk_field=\"toNode_pk\") # edge attributes type = models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'), default='{}') #", "in kwargs: try: et = self.__class__._cache[self.db][kwargs['name']] except KeyError: pass if et is None:", "if self.fromNode else '', 'verb': self.type.read_as, 'to': self.toNode if self.toNode else '' }", "_ from .fields import JSONField from .consistency_enforcers import * class EdgeTypeManager(models.Manager): # Cache", "pass def clear_cache(self): \"\"\" Clear out the edge-type cache. \"\"\" self.__class__._cache.clear() class EdgeType(models.Model):", "= None if 'id' in kwargs: try: et = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass", "= eta def rem_from_cache(self, using, eta): try: del self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id]", "models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects = EdgeTypeAssociationManager() def __unicode__(self): return u\"%(direct)s <-> %(inverse)s\" %", "= self.get(direct=et) self._add_to_cache(self.db, eta) return eta def get_for_inverse_edge_type(self, et): try: eta = self.__class__._inverse_cache[self.db][et.id]", "models.ForeignKey(ContentType, verbose_name=_(u'from node type'), related_name=\"from_node_type_set_for_%(class)s\") fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\")", "import * class EdgeTypeManager(models.Manager): # Cache to avoid re-looking up EdgeType objects all", "related_name='edges') auto = models.BooleanField(_(u'auto created'), default=False) objects = models.Manager() on_site = CurrentSiteManager() class", "= models.CharField(_(u'read as'), max_length=100) objects = EdgeTypeManager() class Meta(object): ordering = ['name'] verbose_name", "dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site',", "def __unicode__(self): return u\"%(direct)s <-> %(inverse)s\" % { 'direct': self.direct, 'inverse': self.inverse }", "def __unicode__(self): return ( _(u'%(from)s %(verb)s %(to)s') % { 'from': self.fromNode if self.fromNode", "CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association,", "self) super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager): # Cache to avoid re-looking up EdgeTypeAssociation objects", "JSONField from .consistency_enforcers import * class EdgeTypeManager(models.Manager): # Cache to avoid re-looking up", "instance.site_id: instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current())) class EdgeCount(models.Model): # fromNode field", "to avoid re-looking up EdgeTypeAssociation objects all over the place. _cache = {}", ") models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect( EdgeCleaner.clean_edges,", "eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta def rem_from_cache(self, using, eta): try: del self.__class__._cache.setdefault(using, {})[eta.id]", "try: eta = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass if eta is None: eta =", "unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site'] ordering = ['-time'] def __unicode__(self):", "try: et = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass elif 'name' in kwargs: try: et", "CurrentSiteManager() def __unicode__(self): return ( _(u'%(from)s has %(count)d %(type)s edge(s)') % { 'from':", "Site.objects.get_current()) # CONNECT LISTENERS TO ENFORCE GRAPH CONSISTENCY models.signals.post_save.connect( SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' )", "site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects = models.Manager() on_site = CurrentSiteManager() def __unicode__(self):", "self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta def rem_from_cache(self, using, eta): try: del self.__class__._cache.setdefault(using, {})[eta.id] del", "et def _add_to_cache(self, using, et): self.__class__._cache.setdefault(using, {})[et.id] = et self.__class__._cache.setdefault(using, {})[et.name] = et", "fromNode_type = models.ForeignKey(ContentType, verbose_name=_(u'from node type')) fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\",", "max_length=100, unique=True) read_as = models.CharField(_(u'read as'), max_length=100) objects = EdgeTypeManager() class Meta(object): ordering", "elif 'name' in kwargs: try: et = self.__class__._cache[self.db][kwargs['name']] except KeyError: pass if et", "u\"%(direct)s <-> %(inverse)s\" % { 'direct': self.direct, 'inverse': self.inverse } def delete(self, using=None):", "<-> %(inverse)s\" % { 'direct': self.direct, 'inverse': self.inverse } def delete(self, using=None): self.__class__.objects.rem_from_cache(using,", "dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect(", "None if 'id' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif", "self.__class__._cache.setdefault(using, {})[eta.id] = eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta def", "\"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse = models.ForeignKey(EdgeType, unique=True,", "SymmetricEdgeManager.create_symmetric_edge, sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association'", "eta = self.get(inverse=et) self._add_to_cache(self.db, eta) return eta def _add_to_cache(self, using, eta): self.__class__._cache.setdefault(using, {})[eta.id]", "unique=True, related_name='is_direct_in') inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects = EdgeTypeAssociationManager() def __unicode__(self): return", "django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.db", "import models from django.dispatch import receiver from django.utils.translation import ugettext_lazy as _ from", "self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using, {})[et.name] except KeyError: pass def clear_cache(self): \"\"\" Clear out", "_inverse_cache = {} def get(self, *args, **kwargs): eta = None if 'id' in", "eta def get_for_inverse_edge_type(self, et): try: eta = self.__class__._inverse_cache[self.db][et.id] except KeyError: eta = self.get(inverse=et)", "models.Manager() on_site = CurrentSiteManager() def __unicode__(self): return ( _(u'%(from)s has %(count)d %(type)s edge(s)')", "= models.ForeignKey(ContentType, verbose_name=_(u'from node type')) fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\")", "the place. _cache = {} _direct_cache = {} _inverse_cache = {} def get(self,", "receiver from django.utils.translation import ugettext_lazy as _ from .fields import JSONField from .consistency_enforcers", "fromNode_pk = models.TextField(_(u'fromNode ID')) fromNode = generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode field toNode_type =", "et = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass elif 'name' in kwargs: try: et =", "coding=utf-8 from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site", "ugettext_lazy as _ from .fields import JSONField from .consistency_enforcers import * class EdgeTypeManager(models.Manager):", "{})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError: pass def clear_cache(self): \"\"\" Clear out the", "= generic.GenericForeignKey(ct_field=\"fromNode_type\", fk_field=\"fromNode_pk\") # toNode field toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\")", "sender=Edge, dispatch_uid='create_symmetric_edge' ) models.signals.post_delete.connect( SymmetricEdgeManager.delete_symmetric_edge, sender=Edge, dispatch_uid='delete_symmetric_edge' ) models.signals.post_save.connect( SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' )", "et def rem_from_cache(self, using, et): try: del self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using, {})[et.name] except", "= models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'), default='{}') # edge metadata time = models.DateTimeField(_(u'time'), auto_now_add=True)", "metadata time = models.DateTimeField(_(u'time'), auto_now_add=True) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto = models.BooleanField(_(u'auto", "et): self.__class__._cache.setdefault(using, {})[et.id] = et self.__class__._cache.setdefault(using, {})[et.name] = et def rem_from_cache(self, using, et):", "eta = self.get(direct=et) self._add_to_cache(self.db, eta) return eta def get_for_inverse_edge_type(self, et): try: eta =", "} ) @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs): if not instance.site_id: instance.site =", "def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeType, self).delete(using) class EdgeTypeAssociationManager(models.Manager): # Cache to avoid", "def get_for_direct_edge_type(self, et): try: eta = self.__class__._direct_cache[self.db][et.id] except KeyError: eta = self.get(direct=et) self._add_to_cache(self.db,", "django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.db import models from django.dispatch", "self.get(direct=et) self._add_to_cache(self.db, eta) return eta def get_for_inverse_edge_type(self, et): try: eta = self.__class__._inverse_cache[self.db][et.id] except", "EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect( EdgeCleaner.clean_edges, dispatch_uid='clean_edges' ) # Clear the EdgeType cache", "JSONField(_(u'attributes'), default='{}') # edge metadata time = models.DateTimeField(_(u'time'), auto_now_add=True) site = models.ForeignKey(Site, verbose_name=_(u'site'),", "rem_from_cache(self, using, et): try: del self.__class__._cache.setdefault(using, {})[et.id] del self.__class__._cache.setdefault(using, {})[et.name] except KeyError: pass", "'site'] ordering = ['-time'] def __unicode__(self): return ( _(u'%(from)s %(verb)s %(to)s') % {", "__unicode__(self): return u\"%(direct)s <-> %(inverse)s\" % { 'direct': self.direct, 'inverse': self.inverse } def", "cache. \"\"\" self.__class__._cache.clear() class EdgeType(models.Model): name = models.CharField(_(u'name'), max_length=100, unique=True) read_as = models.CharField(_(u'read", "def clear_cache(self): \"\"\" Clear out the edge-type cache. \"\"\" self.__class__._cache.clear() class EdgeType(models.Model): name", "time = models.DateTimeField(_(u'time'), auto_now_add=True) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto = models.BooleanField(_(u'auto created'),", "'pk' in kwargs: try: et = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass elif 'name' in", "et = super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db, et) return et def _add_to_cache(self, using, et):", "if 'id' in kwargs: try: et = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk'", "from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from django.db import models from", "= models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in') objects = EdgeTypeAssociationManager() def __unicode__(self): return u\"%(direct)s <-> %(inverse)s\"", "models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge,", "using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model): # fromNode field fromNode_type = models.ForeignKey(ContentType,", ".consistency_enforcers import * class EdgeTypeManager(models.Manager): # Cache to avoid re-looking up EdgeType objects", "out the edge-type-association cache. \"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in')", "toNode field toNode_type = models.ForeignKey(ContentType, verbose_name=_(u'to node type'), related_name=\"to_node_type_set_for_%(class)s\") toNode_pk = models.TextField(_(u'toNode ID'))", "self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in') inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in')", "del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type-association", "edge attributes type = models.ForeignKey(EdgeType) attributes = JSONField(_(u'attributes'), default='{}') # edge metadata time", "%(type)s edge(s)') % { 'from': self.fromNode if self.fromNode else '', 'count': self.count, 'type':", "self).delete(using) class EdgeTypeAssociationManager(models.Manager): # Cache to avoid re-looking up EdgeTypeAssociation objects all over", "from django.contrib.sites.managers import CurrentSiteManager from django.db import models from django.dispatch import receiver from", "get_for_inverse_edge_type(self, et): try: eta = self.__class__._inverse_cache[self.db][et.id] except KeyError: eta = self.get(inverse=et) self._add_to_cache(self.db, eta)", "sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect( EdgeCleaner.clean_edges, dispatch_uid='clean_edges' ) #", "except KeyError: eta = self.get(inverse=et) self._add_to_cache(self.db, eta) return eta def _add_to_cache(self, using, eta):", "pre_save_count_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current()) # CONNECT LISTENERS", "over the place. _cache = {} def get(self, *args, **kwargs): et = None", "class Meta(object): unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site'] ordering = ['-time']", "KeyError: pass elif 'pk' in kwargs: try: et = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass", "**kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current()) # CONNECT LISTENERS TO", "EdgeType objects all over the place. _cache = {} def get(self, *args, **kwargs):", "self.__class__._cache.setdefault(using, {})[et.name] except KeyError: pass def clear_cache(self): \"\"\" Clear out the edge-type cache.", "self.toNode else '' } ) @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs): if not", "pass elif 'pk' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['pk']] except KeyError: pass if", "super(EdgeTypeManager, self).get(*args, **kwargs) self._add_to_cache(self.db, et) return et def _add_to_cache(self, using, et): self.__class__._cache.setdefault(using, {})[et.id]", "clear_cache(self): \"\"\" Clear out the edge-type-association cache. \"\"\" self.__class__._cache.clear() class EdgeTypeAssociation(models.Model): direct =", "unique=True) read_as = models.CharField(_(u'read as'), max_length=100) objects = EdgeTypeManager() class Meta(object): ordering =", "verbose_name = _(u'Edge type') verbose_name_plural = _(u'Edge types') def __unicode__(self): return u'%s' %", "= eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta def rem_from_cache(self, using,", "def pre_save_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current()))", "eta def _add_to_cache(self, using, eta): self.__class__._cache.setdefault(using, {})[eta.id] = eta self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta", "class EdgeTypeManager(models.Manager): # Cache to avoid re-looking up EdgeType objects all over the", "self.__class__._cache.clear() class EdgeType(models.Model): name = models.CharField(_(u'name'), max_length=100, unique=True) read_as = models.CharField(_(u'read as'), max_length=100)", "models.ForeignKey(EdgeType) # count count = models.IntegerField(_(u'count'), default=0) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters') objects", "models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count, sender=Edge, dispatch_uid='decrease_edge_count' ) models.signals.pre_delete.connect( EdgeCleaner.clean_edges, dispatch_uid='clean_edges'", "= self.__class__._inverse_cache[self.db][et.id] except KeyError: eta = self.get(inverse=et) self._add_to_cache(self.db, eta) return eta def _add_to_cache(self,", "**kwargs): et = None if 'id' in kwargs: try: et = self.__class__._cache[self.db][kwargs['id']] except", "try: del self.__class__._cache.setdefault(using, {})[eta.id] del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] except KeyError: pass", "et = self.__class__._cache[self.db][kwargs['id']] except KeyError: pass elif 'pk' in kwargs: try: et =", "et = self.__class__._cache[self.db][kwargs['name']] except KeyError: pass if et is None: et = super(EdgeTypeManager,", "edge metadata time = models.DateTimeField(_(u'time'), auto_now_add=True) site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges') auto =", "SymmetricEdgeTypeAssociationManager.create_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='create_symmetric_edge_type_association' ) models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count'", ") @receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode,", "import ugettext_lazy as _ from .fields import JSONField from .consistency_enforcers import * class", ") models.signals.post_delete.connect( SymmetricEdgeTypeAssociationManager.delete_symmetric_association, sender=EdgeTypeAssociation, dispatch_uid='delete_symmetric_edge_type_association' ) models.signals.post_save.connect( EdgeCounter.increase_count, sender=Edge, dispatch_uid='increase_edge_count' ) models.signals.post_delete.connect( EdgeCounter.decrease_count,", "eta = self.__class__._inverse_cache[self.db][et.id] except KeyError: eta = self.get(inverse=et) self._add_to_cache(self.db, eta) return eta def", "not instance.site_id: instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current()) # CONNECT LISTENERS TO ENFORCE GRAPH", "return eta def get_for_direct_edge_type(self, et): try: eta = self.__class__._direct_cache[self.db][et.id] except KeyError: eta =", "over the place. _cache = {} _direct_cache = {} _inverse_cache = {} def", "@receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site',", "self.get(inverse=et) self._add_to_cache(self.db, eta) return eta def _add_to_cache(self, using, eta): self.__class__._cache.setdefault(using, {})[eta.id] = eta", "@receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge') def pre_save_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site',", "['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site'] ordering = ['-time'] def __unicode__(self): return (", "# Cache to avoid re-looking up EdgeTypeAssociation objects all over the place. _cache", "from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.contrib.sites.managers import CurrentSiteManager from", "dispatch_uid='pre_save_edge_count') def pre_save_count_handler(instance, **kwargs): if not instance.site_id: instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current()) #", "= self.__class__._direct_cache[self.db][et.id] except KeyError: eta = self.get(direct=et) self._add_to_cache(self.db, eta) return eta def get_for_inverse_edge_type(self,", "**kwargs): eta = None if 'id' in kwargs: try: eta = self.__class__._cache[self.db][kwargs['id']] except", "return ( _(u'%(from)s has %(count)d %(type)s edge(s)') % { 'from': self.fromNode if self.fromNode", "def delete(self, using=None): self.__class__.objects.rem_from_cache(using, self) super(EdgeTypeAssociation, self).delete(using) class Edge(models.Model): # fromNode field fromNode_type" ]
[ "\"\"\" def __init_subclass__(mcs, **kwargs): mcs._hash = None mcs.__args__ = None mcs.__origin__ = None", "know the attributes of the returned type after a __getitem__. \"\"\" __origin__ =", "**self.__dict__, '__args__': item, '__origin__': self, } bases = self, *self.__bases__ result = type(self.__name__,", "def __init_subclass__(mcs, **kwargs): mcs._hash = None mcs.__args__ = None mcs.__origin__ = None def", "= None mcs.__args__ = None mcs.__origin__ = None def __getitem__(self, item) -> _SubscribedType:", "become subscriptable. >>> class SomeType(metaclass=SubscriptableType): ... pass >>> SomeTypeSub = SomeType['some args'] >>>", "None __args__ = None class SubscriptableType(type): \"\"\" This metaclass will allow a type", "return self_args == other_args and self_origin == other_origin def __hash__(self): if not getattr(self,", "will allow a type to become subscriptable. >>> class SomeType(metaclass=SubscriptableType): ... pass >>>", "This metaclass will allow a type to become subscriptable. >>> class SomeType(metaclass=SubscriptableType): ...", "SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def __init_subclass__(mcs, **kwargs): mcs._hash = None mcs.__args__ = None mcs.__origin__", "other_origin def __hash__(self): if not getattr(self, '_hash', None): self._hash = hash('{}{}'.format(self.__origin__, self.__args__)) return", "bases, body) if hasattr(result, '_after_subscription'): # TODO check if _after_subscription is static result._after_subscription(item)", "the attributes of the returned type after a __getitem__. \"\"\" __origin__ = None", "bases = self, *self.__bases__ result = type(self.__name__, bases, body) if hasattr(result, '_after_subscription'): #", "static result._after_subscription(item) return result def __eq__(self, other): self_args = getattr(self, '__args__', None) self_origin", "} bases = self, *self.__bases__ result = type(self.__name__, bases, body) if hasattr(result, '_after_subscription'):", "result def __eq__(self, other): self_args = getattr(self, '__args__', None) self_origin = getattr(self, '__origin__',", "is static result._after_subscription(item) return result def __eq__(self, other): self_args = getattr(self, '__args__', None)", "SomeType['some args'] >>> SomeTypeSub.__args__ 'some args' >>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def __init_subclass__(mcs, **kwargs):", "to become subscriptable. >>> class SomeType(metaclass=SubscriptableType): ... pass >>> SomeTypeSub = SomeType['some args']", "getattr(self, '__origin__', None) other_args = getattr(other, '__args__', None) other_origin = getattr(other, '__origin__', None)", "other): self_args = getattr(self, '__args__', None) self_origin = getattr(self, '__origin__', None) other_args =", "SomeTypeSub = SomeType['some args'] >>> SomeTypeSub.__args__ 'some args' >>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def", "self_origin == other_origin def __hash__(self): if not getattr(self, '_hash', None): self._hash = hash('{}{}'.format(self.__origin__,", "'__origin__', None) return self_args == other_args and self_origin == other_origin def __hash__(self): if", "def __eq__(self, other): self_args = getattr(self, '__args__', None) self_origin = getattr(self, '__origin__', None)", "after a __getitem__. \"\"\" __origin__ = None __args__ = None class SubscriptableType(type): \"\"\"", "a placeholder to let the IDE know the attributes of the returned type", "mcs.__args__ = None mcs.__origin__ = None def __getitem__(self, item) -> _SubscribedType: body =", "# TODO check if _after_subscription is static result._after_subscription(item) return result def __eq__(self, other):", "'__args__': item, '__origin__': self, } bases = self, *self.__bases__ result = type(self.__name__, bases,", "None mcs.__origin__ = None def __getitem__(self, item) -> _SubscribedType: body = { **self.__dict__,", "__init_subclass__(mcs, **kwargs): mcs._hash = None mcs.__args__ = None mcs.__origin__ = None def __getitem__(self,", "other_args = getattr(other, '__args__', None) other_origin = getattr(other, '__origin__', None) return self_args ==", "= getattr(self, '__origin__', None) other_args = getattr(other, '__args__', None) other_origin = getattr(other, '__origin__',", "None) other_args = getattr(other, '__args__', None) other_origin = getattr(other, '__origin__', None) return self_args", "the returned type after a __getitem__. \"\"\" __origin__ = None __args__ = None", "None) other_origin = getattr(other, '__origin__', None) return self_args == other_args and self_origin ==", "result = type(self.__name__, bases, body) if hasattr(result, '_after_subscription'): # TODO check if _after_subscription", "{ **self.__dict__, '__args__': item, '__origin__': self, } bases = self, *self.__bases__ result =", "body) if hasattr(result, '_after_subscription'): # TODO check if _after_subscription is static result._after_subscription(item) return", "\"\"\" __origin__ = None __args__ = None class SubscriptableType(type): \"\"\" This metaclass will", "item) -> _SubscribedType: body = { **self.__dict__, '__args__': item, '__origin__': self, } bases", "mcs._hash = None mcs.__args__ = None mcs.__origin__ = None def __getitem__(self, item) ->", "returned type after a __getitem__. \"\"\" __origin__ = None __args__ = None class", "def __getitem__(self, item) -> _SubscribedType: body = { **self.__dict__, '__args__': item, '__origin__': self,", "the IDE know the attributes of the returned type after a __getitem__. \"\"\"", "a __getitem__. \"\"\" __origin__ = None __args__ = None class SubscriptableType(type): \"\"\" This", "args'] >>> SomeTypeSub.__args__ 'some args' >>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def __init_subclass__(mcs, **kwargs): mcs._hash", "and self_origin == other_origin def __hash__(self): if not getattr(self, '_hash', None): self._hash =", "self_args = getattr(self, '__args__', None) self_origin = getattr(self, '__origin__', None) other_args = getattr(other,", "SomeTypeSub.__args__ 'some args' >>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def __init_subclass__(mcs, **kwargs): mcs._hash = None", "type(self.__name__, bases, body) if hasattr(result, '_after_subscription'): # TODO check if _after_subscription is static", "= self, *self.__bases__ result = type(self.__name__, bases, body) if hasattr(result, '_after_subscription'): # TODO", "getattr(other, '__args__', None) other_origin = getattr(other, '__origin__', None) return self_args == other_args and", "_SubscribedType(type): \"\"\" This class is a placeholder to let the IDE know the", "-> _SubscribedType: body = { **self.__dict__, '__args__': item, '__origin__': self, } bases =", ">>> SomeTypeSub = SomeType['some args'] >>> SomeTypeSub.__args__ 'some args' >>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\"", "None) self_origin = getattr(self, '__origin__', None) other_args = getattr(other, '__args__', None) other_origin =", "def __hash__(self): if not getattr(self, '_hash', None): self._hash = hash('{}{}'.format(self.__origin__, self.__args__)) return self._hash", "None def __getitem__(self, item) -> _SubscribedType: body = { **self.__dict__, '__args__': item, '__origin__':", "= None __args__ = None class SubscriptableType(type): \"\"\" This metaclass will allow a", "other_args and self_origin == other_origin def __hash__(self): if not getattr(self, '_hash', None): self._hash", "result._after_subscription(item) return result def __eq__(self, other): self_args = getattr(self, '__args__', None) self_origin =", "SubscriptableType(type): \"\"\" This metaclass will allow a type to become subscriptable. >>> class", "= None class SubscriptableType(type): \"\"\" This metaclass will allow a type to become", "__origin__ = None __args__ = None class SubscriptableType(type): \"\"\" This metaclass will allow", "self_args == other_args and self_origin == other_origin def __hash__(self): if not getattr(self, '_hash',", "pass >>> SomeTypeSub = SomeType['some args'] >>> SomeTypeSub.__args__ 'some args' >>> SomeTypeSub.__origin__.__name__ 'SomeType'", "<reponame>georgeharker/typish<gh_stars>10-100 class _SubscribedType(type): \"\"\" This class is a placeholder to let the IDE", "'__args__', None) self_origin = getattr(self, '__origin__', None) other_args = getattr(other, '__args__', None) other_origin", "= type(self.__name__, bases, body) if hasattr(result, '_after_subscription'): # TODO check if _after_subscription is", "__args__ = None class SubscriptableType(type): \"\"\" This metaclass will allow a type to", "type to become subscriptable. >>> class SomeType(metaclass=SubscriptableType): ... pass >>> SomeTypeSub = SomeType['some", "class _SubscribedType(type): \"\"\" This class is a placeholder to let the IDE know", "SomeType(metaclass=SubscriptableType): ... pass >>> SomeTypeSub = SomeType['some args'] >>> SomeTypeSub.__args__ 'some args' >>>", "allow a type to become subscriptable. >>> class SomeType(metaclass=SubscriptableType): ... pass >>> SomeTypeSub", "= SomeType['some args'] >>> SomeTypeSub.__args__ 'some args' >>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def __init_subclass__(mcs,", "self, } bases = self, *self.__bases__ result = type(self.__name__, bases, body) if hasattr(result,", "'SomeType' \"\"\" def __init_subclass__(mcs, **kwargs): mcs._hash = None mcs.__args__ = None mcs.__origin__ =", "getattr(self, '__args__', None) self_origin = getattr(self, '__origin__', None) other_args = getattr(other, '__args__', None)", "a type to become subscriptable. >>> class SomeType(metaclass=SubscriptableType): ... pass >>> SomeTypeSub =", "metaclass will allow a type to become subscriptable. >>> class SomeType(metaclass=SubscriptableType): ... pass", "'some args' >>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def __init_subclass__(mcs, **kwargs): mcs._hash = None mcs.__args__", "= getattr(other, '__args__', None) other_origin = getattr(other, '__origin__', None) return self_args == other_args", "of the returned type after a __getitem__. \"\"\" __origin__ = None __args__ =", "= { **self.__dict__, '__args__': item, '__origin__': self, } bases = self, *self.__bases__ result", "__eq__(self, other): self_args = getattr(self, '__args__', None) self_origin = getattr(self, '__origin__', None) other_args", "if _after_subscription is static result._after_subscription(item) return result def __eq__(self, other): self_args = getattr(self,", "getattr(other, '__origin__', None) return self_args == other_args and self_origin == other_origin def __hash__(self):", "return result def __eq__(self, other): self_args = getattr(self, '__args__', None) self_origin = getattr(self,", "attributes of the returned type after a __getitem__. \"\"\" __origin__ = None __args__", "_SubscribedType: body = { **self.__dict__, '__args__': item, '__origin__': self, } bases = self,", ">>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def __init_subclass__(mcs, **kwargs): mcs._hash = None mcs.__args__ = None", ">>> SomeTypeSub.__args__ 'some args' >>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def __init_subclass__(mcs, **kwargs): mcs._hash =", "is a placeholder to let the IDE know the attributes of the returned", "= None def __getitem__(self, item) -> _SubscribedType: body = { **self.__dict__, '__args__': item,", "This class is a placeholder to let the IDE know the attributes of", "\"\"\" This metaclass will allow a type to become subscriptable. >>> class SomeType(metaclass=SubscriptableType):", "subscriptable. >>> class SomeType(metaclass=SubscriptableType): ... pass >>> SomeTypeSub = SomeType['some args'] >>> SomeTypeSub.__args__", ">>> class SomeType(metaclass=SubscriptableType): ... pass >>> SomeTypeSub = SomeType['some args'] >>> SomeTypeSub.__args__ 'some", "hasattr(result, '_after_subscription'): # TODO check if _after_subscription is static result._after_subscription(item) return result def", "**kwargs): mcs._hash = None mcs.__args__ = None mcs.__origin__ = None def __getitem__(self, item)", "None) return self_args == other_args and self_origin == other_origin def __hash__(self): if not", "class SomeType(metaclass=SubscriptableType): ... pass >>> SomeTypeSub = SomeType['some args'] >>> SomeTypeSub.__args__ 'some args'", "\"\"\" This class is a placeholder to let the IDE know the attributes", "'_after_subscription'): # TODO check if _after_subscription is static result._after_subscription(item) return result def __eq__(self,", "*self.__bases__ result = type(self.__name__, bases, body) if hasattr(result, '_after_subscription'): # TODO check if", "'__origin__': self, } bases = self, *self.__bases__ result = type(self.__name__, bases, body) if", "= None mcs.__origin__ = None def __getitem__(self, item) -> _SubscribedType: body = {", "let the IDE know the attributes of the returned type after a __getitem__.", "other_origin = getattr(other, '__origin__', None) return self_args == other_args and self_origin == other_origin", "TODO check if _after_subscription is static result._after_subscription(item) return result def __eq__(self, other): self_args", "if hasattr(result, '_after_subscription'): # TODO check if _after_subscription is static result._after_subscription(item) return result", "None class SubscriptableType(type): \"\"\" This metaclass will allow a type to become subscriptable.", "= getattr(other, '__origin__', None) return self_args == other_args and self_origin == other_origin def", "_after_subscription is static result._after_subscription(item) return result def __eq__(self, other): self_args = getattr(self, '__args__',", "'__origin__', None) other_args = getattr(other, '__args__', None) other_origin = getattr(other, '__origin__', None) return", "to let the IDE know the attributes of the returned type after a", "None mcs.__args__ = None mcs.__origin__ = None def __getitem__(self, item) -> _SubscribedType: body", "IDE know the attributes of the returned type after a __getitem__. \"\"\" __origin__", "'__args__', None) other_origin = getattr(other, '__origin__', None) return self_args == other_args and self_origin", "= getattr(self, '__args__', None) self_origin = getattr(self, '__origin__', None) other_args = getattr(other, '__args__',", "__getitem__(self, item) -> _SubscribedType: body = { **self.__dict__, '__args__': item, '__origin__': self, }", "placeholder to let the IDE know the attributes of the returned type after", "... pass >>> SomeTypeSub = SomeType['some args'] >>> SomeTypeSub.__args__ 'some args' >>> SomeTypeSub.__origin__.__name__", "args' >>> SomeTypeSub.__origin__.__name__ 'SomeType' \"\"\" def __init_subclass__(mcs, **kwargs): mcs._hash = None mcs.__args__ =", "class is a placeholder to let the IDE know the attributes of the", "class SubscriptableType(type): \"\"\" This metaclass will allow a type to become subscriptable. >>>", "type after a __getitem__. \"\"\" __origin__ = None __args__ = None class SubscriptableType(type):", "== other_args and self_origin == other_origin def __hash__(self): if not getattr(self, '_hash', None):", "== other_origin def __hash__(self): if not getattr(self, '_hash', None): self._hash = hash('{}{}'.format(self.__origin__, self.__args__))", "body = { **self.__dict__, '__args__': item, '__origin__': self, } bases = self, *self.__bases__", "check if _after_subscription is static result._after_subscription(item) return result def __eq__(self, other): self_args =", "item, '__origin__': self, } bases = self, *self.__bases__ result = type(self.__name__, bases, body)", "self_origin = getattr(self, '__origin__', None) other_args = getattr(other, '__args__', None) other_origin = getattr(other,", "self, *self.__bases__ result = type(self.__name__, bases, body) if hasattr(result, '_after_subscription'): # TODO check", "__getitem__. \"\"\" __origin__ = None __args__ = None class SubscriptableType(type): \"\"\" This metaclass", "mcs.__origin__ = None def __getitem__(self, item) -> _SubscribedType: body = { **self.__dict__, '__args__':" ]
[ "sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2 - out_channels :256, 28,506,873", "강제로 128 지정시 13,033,555 / 128 all 변경 : 9,465,555 \") # 분류를", "= torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64,", "= AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1,", "model def get_model_instance_segmentation(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model", "torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 256 anchor_generator =", "predictor with a new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model def", "128 all 변경 : 9,465,555 \") # 분류를 위한 입력 특징 차원을 얻습니다", "torchvision.models.detection import FasterRCNN from torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn", "get_model_instance_segmentation2(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False,", "(15,000,000)\") # # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone #", "2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2", "model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask", "<reponame>yeodongbin/2020AIChallengeCode<gh_stars>0 import torchvision from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch", "= 1280 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler", "1 backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0,", "call2 - out_channels :1280, 19,540,921\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features", "get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256,", "from torchvision.models.detection import FasterRCNN from torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator from", "print(\"densenet161 call2 - out_channels :256, 28,506,873 / 150M\") # 분류를 위한 입력 특징", "= model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)", "hidden_layer, # num_classes) return model def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512", "학습된 헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer", "aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator,", "import EfficientNet from torchvision.models.detection import FasterRCNN from torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn import", "torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels", "# box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886\") # 분류를 위한 입력 특징", ": 43,702,739, resnet basicblock 3*3 -> 1*1 : 20,549,203 / basic : 20,543,571", "COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model", "torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2 - out_channels", "FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 # and replace the mask", "# num_classes) return model def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator", "위한 입력 특징 차원을 얻습니다 #in_features = backbone # 미리 학습된 헤더를 새로운", "FastRCNNPredictor from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch import EfficientNet from torchvision.models.detection import FasterRCNN", "4,862,777 / 19.5M\") print(\"squeezenet1_1 call2 - out_channels :256, 2,757,369 / 11M (15,000,000 /", "with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model", "128 # and replace the mask predictor with a new one model.roi_heads.mask_predictor =", "torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0,", "mask predictor with a new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model", "/ 채널 : 강제로 128 지정시 13,033,555 / 128 all 변경 : 9,465,555", "torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler )", ": 20,549,203 / basic : 20,543,571 / basicblock con1 : 20,195,411 / 채널", "def get_model_instance_segmentation(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model =", "# num_classes) return model def get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리 학습된 인스턴스 분할", "AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model =", "resnet : 24,743,507 mobilenet : 87,366,291 squeezenet : 33,161,683 densnet : 43,702,739, resnet", "- out_channels :512, 4,808,441 / 19.2M (15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 33,192,463", "pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator =", "roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model = MaskRCNN(backbone,", "from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch import EfficientNet from torchvision.models.detection import FasterRCNN from", "= torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280", "mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool =", "roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2", "= torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5,", "rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886\") # 분류를 위한 입력", "model def get_model_instance_segmentation5(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model", "- out_channels :1280, 19,540,921\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features =", "get_model_instance_segmentation(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False,", "from torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def", ": 22,468,758/ 19,333,398 / custom resent (64 쭉..) 17,664,662\") return model def get_model_instance_segmentation0(num_classes):", "64, 128, 256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1,", "= AnchorGenerator(sizes=((32, 64, 128, 256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],", "1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model", "sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool", "return model def get_model_instance_segmentation5(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다", "# hidden_layer, # num_classes) return model def get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리 학습된", "torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 256 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256,", "28,506,873 / 150M\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone", "#print(\"squeezenet1_0 call2 - out_channels :516, 4,862,777 / 19.5M\") #print(\"squeezenet1_1 call2 - out_channels :516,", "torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes):", "num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886\") # 분류를", "512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14,", "num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2 - out_channels :256, 28,506,873 / 150M\") # 분류를", "def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128,", "특징 차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다", "rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1 - resnet : 24,743,507 mobilenet : 87,366,291", "pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256,", "call - 41,755,286 / \") return model def get_model_instance_segmentation(num_classes): # COCO 에서 미리", "new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation6(num_classes):", "# and replace the mask predictor with a new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,", "= FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the", "torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 - out_channels", "9,465,555 \") # 분류를 위한 입력 특징 차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features #", "22,463,126 / 오잉..light resnet : 22,468,758/ 19,333,398 / custom resent (64 쭉..) 17,664,662\")", "= 1 # and replace the mask predictor with a new one #model.roi_heads.mask_predictor", "COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model", "backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64,", "256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],", ": 87,366,291 squeezenet : 33,161,683 densnet : 43,702,739, resnet basicblock 3*3 -> 1*1", "= MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation4(num_classes): # COCO 에서", "AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2)", "#backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),", "out_channels :1280, 18,052,473 / 72M\") #print(\"squeezenet1_0 call2 - out_channels :516, 4,862,777 / 19.5M\")", "print(\"squeezenet1_1 call2 - out_channels :256, 2,757,369 / 11M (15,000,000 / 15,000,000)\") print(\"squeezenet1_1 call2", "것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 #", "# hidden_layer, # num_classes) return model def get_model_instance_segmentation5(num_classes): # COCO 에서 미리 학습된", "입력 특징 차원을 얻습니다 #in_features = backbone # 미리 학습된 헤더를 새로운 것으로", "custom call1 - resnet : 24,743,507 mobilenet : 87,366,291 squeezenet : 33,161,683 densnet", "hidden_layer, # num_classes) return model def get_model_instance_segmentation5(num_classes): # COCO 에서 미리 학습된 인스턴스", "특징 차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다", "바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 # and", "= 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),))", "/ 오잉..light resnet : 22,468,758/ 19,333,398 / custom resent (64 쭉..) 17,664,662\") return", "입력 특징 차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로", "import FasterRCNN from torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn import", "torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 512 anchor_generator =", "call2 - out_channels :512, 4,808,441 / 19.2M (15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512,", "# 분류를 위한 입력 특징 차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된", "print(\"squeezenet1_1 call2 - out_channels :512, 4,808,441 / 19.2M (15,000,000)\") print(\"squeezenet1_1 call2 - out_channels", "(resnet50) / 28,730,006 (resnet18) / 28,730,006 resnet / 22,463,126 / 오잉..light resnet :", "1 backbone.out_channels = 256 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0,", ":1280, 19,540,921\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone #", "#model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call - 41,401,661", "2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes,", "replace the mask predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer,", "call2 - out_channels :256, 28,506,873 / 150M\") # 분류를 위한 입력 특징 차원을", "20,549,203 / basic : 20,543,571 / basicblock con1 : 20,195,411 / 채널 :", "num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 # and replace the mask predictor", "torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels", "MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler ) #print(\"squeezenet1_0 call2 - out_channels :1280,", "#model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels =", "특징 차원을 얻습니다 #in_features = backbone # 미리 학습된 헤더를 새로운 것으로 바꿉니다", "pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 1280 anchor_generator = AnchorGenerator(sizes=((32,", "#model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels =", "= torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128,", "128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model =", "#backbone.out_channels = 1 backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),", "차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor", "num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 - out_channels :1280, 19,540,921\") # 분류를 위한 입력", "call2 - out_channels :1280, 18,052,473 / 72M\") #print(\"squeezenet1_0 call2 - out_channels :516, 4,862,777", "읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels", ": 9,465,555 \") # 분류를 위한 입력 특징 차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features", "22,468,758/ 19,333,398 / custom resent (64 쭉..) 17,664,662\") return model def get_model_instance_segmentation0(num_classes): model", "= FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6 - out_channels :512, 4,808,441 / (15,000,000)", "학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)", "분류를 위한 입력 특징 차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를", "all 변경 : 9,465,555 \") # 분류를 위한 입력 특징 차원을 얻습니다 in_features", "87,366,291 squeezenet : 33,161,683 densnet : 43,702,739, resnet basicblock 3*3 -> 1*1 :", "에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model =", "학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)", "and replace the mask predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, #", "backbone # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask", "resnet : 22,468,758/ 19,333,398 / custom resent (64 쭉..) 17,664,662\") return model def", "new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation_custom1(num_classes):", "2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161", "/ 150M\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone #", "#in_features = backbone # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features,", "model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 # and replace the mask predictor with a new", "squeezenet : 33,161,683 densnet : 43,702,739, resnet basicblock 3*3 -> 1*1 : 20,549,203", "con1 : 20,195,411 / 채널 : 강제로 128 지정시 13,033,555 / 128 all", "import MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import", "1*1 : 20,549,203 / basic : 20,543,571 / basicblock con1 : 20,195,411 /", "# sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom", "pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels =", "get_model_instance_segmentation4(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False,", "41,401,661 / 41,532,886\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features", "= fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50) / 28,730,006 (resnet18) /", "오잉..light resnet : 22,468,758/ 19,333,398 / custom resent (64 쭉..) 17,664,662\") return model", "custom resent (64 쭉..) 17,664,662\") return model def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)", "128 지정시 13,033,555 / 128 all 변경 : 9,465,555 \") # 분류를 위한", "4,849,849 4,862,777 / 19.5M\") print(\"squeezenet1_1 call2 - out_channels :256, 2,757,369 / 11M (15,000,000", "17,664,662\") return model def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call -", ":516, 4,862,777 / 19.5M\") #print(\"squeezenet1_1 call2 - out_channels :516, 4,849,849 4,862,777 / 19.5M\")", "model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask", "call1 - resnet : 24,743,507 mobilenet : 87,366,291 squeezenet : 33,161,683 densnet :", "fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom", "FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 - out_channels :1280, 19,540,921\") # 분류를 위한", "= torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes,", "print(\"squeezenet1_1 call2 - out_channels :512, 33,192,463 33,161,683 / 172M (15,000,000)\") # # 분류를", "MaskRCNNPredictor from efficientnet_pytorch import EfficientNet from torchvision.models.detection import FasterRCNN from torchvision.models.detection import MaskRCNN", "1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler", "FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the mask", "2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6", "torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128,", "# rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1 - resnet : 24,743,507 mobilenet :", "19,540,921\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone # 미리", "# 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone # 미리 학습된", "pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 256 anchor_generator = AnchorGenerator(sizes=((32,", "call2 - out_channels :516, 4,862,777 / 19.5M\") #print(\"squeezenet1_1 call2 - out_channels :516, 4,849,849", "= FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 # and replace the", "output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2 - out_channels :256,", "13,033,555 / 128 all 변경 : 9,465,555 \") # 분류를 위한 입력 특징", "torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6 - out_channels", "3*3 -> 1*1 : 20,549,203 / basic : 20,543,571 / basicblock con1 :", "torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 1280 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256,", "# hidden_layer, # num_classes) return model def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels =", "= 256 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler", "2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model =", ":256, 2,757,369 / 11M (15,000,000 / 15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 4,808,441", "#model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 1280", ":512, 4,808,441 / 19.2M (15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 33,192,463 33,161,683 /", "output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler,", "미리 학습된 인스턴스 분할 모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False,", "return model def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286", "model def get_model_instance_segmentation2(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model", "pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels =", "one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation5(num_classes): #", "MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리", "24,743,507 mobilenet : 87,366,291 squeezenet : 33,161,683 densnet : 43,702,739, resnet basicblock 3*3", "num_classes) return model def get_model_instance_segmentation5(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을", "model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler ) #print(\"squeezenet1_0 call2 -", "sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler ) #print(\"squeezenet1_0 call2", "torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels", "def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 / \")", "= fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 / \") return model def", "- 41,401,661 / 41,532,886\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features =", "= MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model def get_model_instance_segmentation2(num_classes): # COCO 에서 미리 학습된", "num_classes) return model def get_model_instance_segmentation4(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을", "and replace the mask predictor with a new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer,", "/ custom resent (64 쭉..) 17,664,662\") return model def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False,", "- 41,755,286 / \") return model def get_model_instance_segmentation(num_classes): # COCO 에서 미리 학습된", "= torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator,", "rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 - out_channels :1280, 19,540,921\") # 분류를 위한 입력 특징", "얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor =", "미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels", "call2 - out_channels :256, 2,757,369 / 11M (15,000,000 / 15,000,000)\") print(\"squeezenet1_1 call2 -", "FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886\")", "num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the mask predictor", "FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6 - out_channels :512, 4,808,441 / (15,000,000) \")", "64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler", "읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels", "/ 22,463,126 / 오잉..light resnet : 22,468,758/ 19,333,398 / custom resent (64 쭉..)", "= torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 256 anchor_generator = AnchorGenerator(sizes=((32, 64, 128,", "= model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the mask predictor with a", "pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 / \") return model def get_model_instance_segmentation(num_classes): #", "mask predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes)", "64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model", "11M (15,000,000 / 15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 4,808,441 / 19.2M (15,000,000)\")", "#hidden_layer = 1 # and replace the mask predictor with a new one", "= MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler ) #print(\"squeezenet1_0 call2 - out_channels", "sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call -", "새로운 것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128", "=roi_pooler, mask_roi_pool = mask_roi_pooler ) #print(\"squeezenet1_0 call2 - out_channels :1280, 18,052,473 / 72M\")", "out_channels :516, 4,862,777 / 19.5M\") #print(\"squeezenet1_1 call2 - out_channels :516, 4,849,849 4,862,777 /", "# # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone # 미리", "FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1 - resnet :", "return model def get_model_instance_segmentation(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다", "(64 쭉..) 17,664,662\") return model def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom", "hidden_layer, # num_classes) return model def get_model_instance_segmentation4(num_classes): # COCO 에서 미리 학습된 인스턴스", "import MaskRCNNPredictor from efficientnet_pytorch import EfficientNet from torchvision.models.detection import FasterRCNN from torchvision.models.detection import", "backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),))", "학습된 인스턴스 분할 모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)", "maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator", "model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 / \") return model", "19.2M (15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 33,192,463 33,161,683 / 172M (15,000,000)\") #", "#backbone.out_channels = 1 backbone.out_channels = 256 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),", "model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2 - out_channels :256, 28,506,873 /", "모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features", "= model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 # and replace the mask predictor with a", "/ 11M (15,000,000 / 15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 4,808,441 / 19.2M", "output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6 - out_channels :512,", "128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler =", "= torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6 -", "fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 / \") return model def get_model_instance_segmentation(num_classes):", "모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features", "/ 128 all 변경 : 9,465,555 \") # 분류를 위한 입력 특징 차원을", "헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer =", "#model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation4(num_classes): # COCO", "out_channels :516, 4,849,849 4,862,777 / 19.5M\") print(\"squeezenet1_1 call2 - out_channels :256, 2,757,369 /", "# COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)", "roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6", "41,532,886\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리", "19.5M\") #print(\"squeezenet1_1 call2 - out_channels :516, 4,849,849 4,862,777 / 19.5M\") print(\"squeezenet1_1 call2 -", "= torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 1280 anchor_generator = AnchorGenerator(sizes=((32, 64, 128,", "import AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model", "#model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation_custom1(num_classes): # COCO", "from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch import EfficientNet from", "def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50) /", "28,730,006 (resnet18) / 28,730,006 resnet / 22,463,126 / 오잉..light resnet : 22,468,758/ 19,333,398", "new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation5(num_classes):", "# sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call", "20,543,571 / basicblock con1 : 20,195,411 / 채널 : 강제로 128 지정시 13,033,555", "#print(\"squeezenet1_1 call2 - out_channels :516, 4,849,849 4,862,777 / 19.5M\") print(\"squeezenet1_1 call2 - out_channels", "차원을 얻습니다 #in_features = backbone # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor", "#model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1 -", "= torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 512 anchor_generator", "33,161,683 densnet : 43,702,739, resnet basicblock 3*3 -> 1*1 : 20,549,203 / basic", "pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator =", "torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch import EfficientNet from torchvision.models.detection import FasterRCNN from torchvision.models.detection", "# num_classes) return model def get_model_instance_segmentation5(num_classes): # COCO 에서 미리 학습된 인스턴스 분할", "maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50)", "output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn", "= FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 - out_channels :1280, 19,540,921\") # 분류를", "output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 - out_channels :1280,", "mask_roi_pool = mask_roi_pooler ) #print(\"squeezenet1_0 call2 - out_channels :1280, 18,052,473 / 72M\") #print(\"squeezenet1_0", "FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2 - out_channels :256, 28,506,873 / 150M\") #", "num_classes) return model def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator =", "call - 41,755,286 (resnet50) / 28,730,006 (resnet18) / 28,730,006 resnet / 22,463,126 /", "import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286", "call2 - out_channels :516, 4,849,849 4,862,777 / 19.5M\") print(\"squeezenet1_1 call2 - out_channels :256,", "72M\") #print(\"squeezenet1_0 call2 - out_channels :516, 4,862,777 / 19.5M\") #print(\"squeezenet1_1 call2 - out_channels", "모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features", "= torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 256 anchor_generator", "/ 15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 4,808,441 / 19.2M (15,000,000)\") print(\"squeezenet1_1 call2", "box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1 - resnet : 24,743,507 mobilenet : 87,366,291 squeezenet :", "#anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler =", "with a new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model def get_model_instance_segmentation2(num_classes):", "- out_channels :512, 33,192,463 33,161,683 / 172M (15,000,000)\") # # 분류를 위한 입력", "AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2)", "rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6 - out_channels :512, 4,808,441 / (15,000,000) \") return model", "num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6 - out_channels :512, 4,808,441 / (15,000,000) \") return", "one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation4(num_classes): #", "backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),", "MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn", "COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model", "torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256,", "get_model_instance_segmentation5(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False,", "model def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64,", "EfficientNet from torchvision.models.detection import FasterRCNN from torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator", "2,757,369 / 11M (15,000,000 / 15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 4,808,441 /", "- resnet : 24,743,507 mobilenet : 87,366,291 squeezenet : 33,161,683 densnet : 43,702,739,", "- out_channels :1280, 18,052,473 / 72M\") #print(\"squeezenet1_0 call2 - out_channels :516, 4,862,777 /", "return model def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32,", "sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1", "512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler =", "replace the mask predictor with a new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)", "from torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from", "model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model def get_model_instance_segmentation2(num_classes): # COCO 에서 미리", "resent (64 쭉..) 17,664,662\") return model def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn", "num_classes) return model def get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을", "return model def get_model_instance_segmentation2(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다", "num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1 - resnet : 24,743,507 mobilenet", "hidden_layer, # num_classes) return model def get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리 학습된 인스턴스", "one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model def get_model_instance_segmentation2(num_classes): # COCO 에서", "256 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler =", "FasterRCNN from torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn", "/ 41,532,886\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features #", "model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 - out_channels :1280, 19,540,921\") #", "입력 특징 차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로", "것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 #", "#model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels =", "= torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), #", "hidden_layer, num_classes) return model def get_model_instance_segmentation2(num_classes): # COCO 에서 미리 학습된 인스턴스 분할", "def get_model_instance_segmentation2(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model =", "256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2)", "# hidden_layer, # num_classes) return model def get_model_instance_segmentation4(num_classes): # COCO 에서 미리 학습된", "aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone,", "#in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features,", "33,192,463 33,161,683 / 172M (15,000,000)\") # # 분류를 위한 입력 특징 차원을 얻습니다", "torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), # aspect_ratios=((0.5,", "model def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 /", "/ 19.2M (15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 33,192,463 33,161,683 / 172M (15,000,000)\")", "# rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886\") # 분류를 위한", "15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 4,808,441 / 19.2M (15,000,000)\") print(\"squeezenet1_1 call2 -", "- out_channels :256, 28,506,873 / 150M\") # 분류를 위한 입력 특징 차원을 얻습니다", "basicblock 3*3 -> 1*1 : 20,549,203 / basic : 20,543,571 / basicblock con1", "backbone.out_channels = 1280 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),))", "512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model", "분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone # 미리 학습된 헤더를", "= backbone # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)", "= 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler", "4,808,441 / 19.2M (15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 33,192,463 33,161,683 / 172M", "model def get_model_instance_segmentation4(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model", "model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32,", "def get_model_instance_segmentation5(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model =", "읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels", "1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, #", ":1280, 18,052,473 / 72M\") #print(\"squeezenet1_0 call2 - out_channels :516, 4,862,777 / 19.5M\") #print(\"squeezenet1_1", "#backbone.out_channels = 1 backbone.out_channels = 1280 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),", "41,755,286 (resnet50) / 28,730,006 (resnet18) / 28,730,006 resnet / 22,463,126 / 오잉..light resnet", "19.5M\") print(\"squeezenet1_1 call2 - out_channels :256, 2,757,369 / 11M (15,000,000 / 15,000,000)\") print(\"squeezenet1_1", "150M\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone # 미리", "인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone", "얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor =", "sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6 - out_channels :512, 4,808,441", "= torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2 -", "the mask predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, #", "from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False,", "모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features", "/ 172M (15,000,000)\") # # 분류를 위한 입력 특징 차원을 얻습니다 #in_features =", "anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],", "# COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)", "= 1 backbone.out_channels = 1280 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5,", "43,702,739, resnet basicblock 3*3 -> 1*1 : 20,549,203 / basic : 20,543,571 /", "model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 # and replace", "out_channels :256, 28,506,873 / 150M\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features", "torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, #", "rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2 - out_channels :256, 28,506,873 / 150M\") # 분류를 위한", "print(\"maskrcnn_resnet50_fpn custom call1 - resnet : 24,743,507 mobilenet : 87,366,291 squeezenet : 33,161,683", "\") # 분류를 위한 입력 특징 차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리", "33,161,683 / 172M (15,000,000)\") # # 분류를 위한 입력 특징 차원을 얻습니다 #in_features", "# num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886\") #", "call2 - out_channels :512, 33,192,463 33,161,683 / 172M (15,000,000)\") # # 분류를 위한", "분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone =", "바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and", "AnchorGenerator(sizes=((32, 64, 128, 256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], #", "얻습니다 #in_features = backbone # 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor =", "custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call -", "from efficientnet_pytorch import EfficientNet from torchvision.models.detection import FasterRCNN from torchvision.models.detection import MaskRCNN from", "a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def", "= MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation_custom1(num_classes): # COCO 에서", "# output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler)", "= torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1", "# num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1 - resnet : 24,743,507", "1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler)", "에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model =", "print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 / \") return model def get_model_instance_segmentation(num_classes): # COCO", "172M (15,000,000)\") # # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone", "# box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1 - resnet : 24,743,507 mobilenet : 87,366,291 squeezenet", "분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone =", "헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer =", "hidden_layer = 128 # and replace the mask predictor with a new one", "resnet basicblock 3*3 -> 1*1 : 20,549,203 / basic : 20,543,571 / basicblock", "# aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model =", "new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model def get_model_instance_segmentation2(num_classes): # COCO", "MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features backbone.out_channels", "# 미리 학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask =", "out_channels :1280, 19,540,921\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features = backbone", "roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2", "차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor", "= mask_roi_pooler ) #print(\"squeezenet1_0 call2 - out_channels :1280, 18,052,473 / 72M\") #print(\"squeezenet1_0 call2", "/ 19.5M\") print(\"squeezenet1_1 call2 - out_channels :256, 2,757,369 / 11M (15,000,000 / 15,000,000)\")", "def get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 model =", "28,730,006 resnet / 22,463,126 / 오잉..light resnet : 22,468,758/ 19,333,398 / custom resent", "backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 1280 anchor_generator = AnchorGenerator(sizes=((32, 64,", "= torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 -", "= FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"maskrcnn_resnet50_fpn custom call1 - resnet", "- out_channels :516, 4,862,777 / 19.5M\") #print(\"squeezenet1_1 call2 - out_channels :516, 4,849,849 4,862,777", "MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation4(num_classes): # COCO 에서 미리", "/ 72M\") #print(\"squeezenet1_0 call2 - out_channels :516, 4,862,777 / 19.5M\") #print(\"squeezenet1_1 call2 -", "torchvision from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch import EfficientNet", "읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels", "# and replace the mask predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,", "pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32,", "#model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 512", "= 1 backbone.out_channels = 512 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5,", "학습된 헤더를 새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer", "\") return model def get_model_instance_segmentation(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을", "torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch import EfficientNet from torchvision.models.detection", "미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False,", "#backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), # aspect_ratios=((0.5, 1.0,", "torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn import AnchorGenerator from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn", "/ 28,730,006 (resnet18) / 28,730,006 resnet / 22,463,126 / 오잉..light resnet : 22,468,758/", "print(\"fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886\") # 분류를 위한 입력 특징 차원을 얻습니다", "쭉..) 17,664,662\") return model def get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call", "efficientnet_pytorch import EfficientNet from torchvision.models.detection import FasterRCNN from torchvision.models.detection import MaskRCNN from torchvision.models.detection.rpn", "mask_roi_pooler ) #print(\"squeezenet1_0 call2 - out_channels :1280, 18,052,473 / 72M\") #print(\"squeezenet1_0 call2 -", "custom call - 41,755,286 / \") return model def get_model_instance_segmentation(num_classes): # COCO 에서", "box_roi_pool=roi_pooler) print(\"densenet161 call2 - out_channels :256, 28,506,873 / 150M\") # 분류를 위한 입력", "predictor with a new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return", "basic : 20,543,571 / basicblock con1 : 20,195,411 / 채널 : 강제로 128", ":516, 4,849,849 4,862,777 / 19.5M\") print(\"squeezenet1_1 call2 - out_channels :256, 2,757,369 / 11M", "인스턴스 분할 모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone", "box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886\") # 분류를 위한 입력 특징 차원을", "# 미리 학습된 헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask =", "print(\"mobilenet_v2 call2 - out_channels :1280, 19,540,921\") # 분류를 위한 입력 특징 차원을 얻습니다", "pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels =", "in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features,", "= MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation6(num_classes): backbone = torchvision.models.squeezenet1_1(pretrained=False).features", "MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation5(num_classes): # COCO 에서 미리", "the mask predictor with a new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return", "= maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280", "model def get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 model", "/ basicblock con1 : 20,195,411 / 채널 : 강제로 128 지정시 13,033,555 /", "return model def get_model_instance_segmentation4(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다", "(15,000,000 / 15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 4,808,441 / 19.2M (15,000,000)\") print(\"squeezenet1_1", ":512, 33,192,463 33,161,683 / 172M (15,000,000)\") # # 분류를 위한 입력 특징 차원을", "#model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels =", "densnet : 43,702,739, resnet basicblock 3*3 -> 1*1 : 20,549,203 / basic :", "one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation6(num_classes): backbone", "- 41,755,286 (resnet50) / 28,730,006 (resnet18) / 28,730,006 resnet / 22,463,126 / 오잉..light", "num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler ) #print(\"squeezenet1_0 call2 - out_channels :1280, 18,052,473", "128, 256, 512),), # aspect_ratios=((0.5, 1.0, 2.0),)) #roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, #", "41,755,286 / \") return model def get_model_instance_segmentation(num_classes): # COCO 에서 미리 학습된 인스턴스", "채널 : 강제로 128 지정시 13,033,555 / 128 all 변경 : 9,465,555 \")", "지정시 13,033,555 / 128 all 변경 : 9,465,555 \") # 분류를 위한 입력", "call - 41,401,661 / 41,532,886\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features", "= AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7,", "out_channels :512, 4,808,441 / 19.2M (15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 33,192,463 33,161,683", "MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model def get_model_instance_segmentation2(num_classes): # COCO 에서 미리 학습된 인스턴스", "1 backbone.out_channels = 1280 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0,", "모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.squeezenet1_1(pretrained=False).features", "custom_model.faster_rcnn import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)", "model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the mask predictor with a new", "return model def get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다", "미리 학습된 헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels", "out_channels :512, 33,192,463 33,161,683 / 172M (15,000,000)\") # # 분류를 위한 입력 특징", "get_model_instance_segmentation_custom1(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False,", "= FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"densenet161 call2 - out_channels :256, 28,506,873 / 150M\")", "= 128 # and replace the mask predictor with a new one model.roi_heads.mask_predictor", "512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes,", "pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50) / 28,730,006 (resnet18) / 28,730,006 resnet", "a new one model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes) return model def get_model_instance_segmentation2(num_classes): #", "# 분류를 위한 입력 특징 차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된", "/ \") return model def get_model_instance_segmentation(num_classes): # COCO 에서 미리 학습된 인스턴스 분할", ": 강제로 128 지정시 13,033,555 / 128 all 변경 : 9,465,555 \") #", "19,333,398 / custom resent (64 쭉..) 17,664,662\") return model def get_model_instance_segmentation0(num_classes): model =", "1280 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler =", "1 # and replace the mask predictor with a new one #model.roi_heads.mask_predictor =", "one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation_custom1(num_classes): #", "box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler ) #print(\"squeezenet1_0 call2 - out_channels :1280, 18,052,473 /", "(resnet18) / 28,730,006 resnet / 22,463,126 / 오잉..light resnet : 22,468,758/ 19,333,398 /", "분할 모델을 읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone =", "= model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운 것으로 바꿉니다 model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)", "sampling_ratio=2) model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 - out_channels :1280, 19,540,921\")", "위한 입력 특징 차원을 얻습니다 #in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운", "output_size=14, sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler ) #print(\"squeezenet1_0", "위한 입력 특징 차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를 새로운", "/ basic : 20,543,571 / basicblock con1 : 20,195,411 / 채널 : 강제로", "out_channels :256, 2,757,369 / 11M (15,000,000 / 15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512,", ": 24,743,507 mobilenet : 87,366,291 squeezenet : 33,161,683 densnet : 43,702,739, resnet basicblock", "분류를 위한 입력 특징 차원을 얻습니다 in_features = model.roi_heads.box_predictor.cls_score.in_features # 미리 학습된 헤더를", "import fasterrcnn_resnet50_fpn from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn", "= 1 backbone.out_channels = 256 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5,", "4,862,777 / 19.5M\") #print(\"squeezenet1_1 call2 - out_channels :516, 4,849,849 4,862,777 / 19.5M\") print(\"squeezenet1_1", "인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone", "#model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace", "새로운 것으로 바꿉니다 #model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes) #in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1", "256, 512),), aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=1, sampling_ratio=2) model = FasterRCNN(backbone,", "#model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator = AnchorGenerator(sizes=((32,", "= torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 1280 anchor_generator", "#model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 256", "(15,000,000)\") print(\"squeezenet1_1 call2 - out_channels :512, 33,192,463 33,161,683 / 172M (15,000,000)\") # #", "# num_classes) return model def get_model_instance_segmentation4(num_classes): # COCO 에서 미리 학습된 인스턴스 분할", "torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool", "model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50) / 28,730,006 (resnet18)", "= torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1", "backbone.out_channels = 256 anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),))", "output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn", "def get_model_instance_segmentation4(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model =", ": 20,543,571 / basicblock con1 : 20,195,411 / 채널 : 강제로 128 지정시", "backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 256 anchor_generator = AnchorGenerator(sizes=((32, 64,", "20,195,411 / 채널 : 강제로 128 지정시 13,033,555 / 128 all 변경 :", "torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1 backbone.out_channels = 1280 anchor_generator =", "new one #model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation4(num_classes):", "= torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2) model = MaskRCNN(backbone, num_classes=num_classes, box_roi_pool =roi_pooler, mask_roi_pool = mask_roi_pooler", ": 33,161,683 densnet : 43,702,739, resnet basicblock 3*3 -> 1*1 : 20,549,203 /", "fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50) / 28,730,006 (resnet18) / 28,730,006", "변경 : 9,465,555 \") # 분류를 위한 입력 특징 차원을 얻습니다 in_features =", "num_classes) return model def get_model_instance_segmentation2(num_classes): # COCO 에서 미리 학습된 인스턴스 분할 모델을", "from custom_model.mask_rcnn import maskrcnn_resnet50_fpn def get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call", "- out_channels :516, 4,849,849 4,862,777 / 19.5M\") print(\"squeezenet1_1 call2 - out_channels :256, 2,757,369", "in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels hidden_layer = 128 # and replace the mask predictor with", "/ 28,730,006 resnet / 22,463,126 / 오잉..light resnet : 22,468,758/ 19,333,398 / custom", "#print(\"squeezenet1_0 call2 - out_channels :1280, 18,052,473 / 72M\") #print(\"squeezenet1_0 call2 - out_channels :516,", ": 20,195,411 / 채널 : 강제로 128 지정시 13,033,555 / 128 all 변경", "18,052,473 / 72M\") #print(\"squeezenet1_0 call2 - out_channels :516, 4,862,777 / 19.5M\") #print(\"squeezenet1_1 call2", "-> 1*1 : 20,549,203 / basic : 20,543,571 / basicblock con1 : 20,195,411", "/ 19.5M\") #print(\"squeezenet1_1 call2 - out_channels :516, 4,849,849 4,862,777 / 19.5M\") print(\"squeezenet1_1 call2", "resnet / 22,463,126 / 오잉..light resnet : 22,468,758/ 19,333,398 / custom resent (64", "= FasterRCNN(backbone, # num_classes=num_classes, # rpn_anchor_generator=anchor_generator, # box_roi_pool=roi_pooler) print(\"fasterrcnn_resnet50_fpn call - 41,401,661 /", "aspect_ratios=((0.5, 1.0, 2.0),)) roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=7, sampling_ratio=2) mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], output_size=14, sampling_ratio=2)", "import torchvision from torchvision.models.detection.faster_rcnn import FastRCNNPredictor from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch import", "get_model_instance_segmentation_custom0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50) / 28,730,006", "#roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], # output_size=1, # sampling_ratio=2) #model = FasterRCNN(backbone, # num_classes=num_classes, #", "에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model =", "읽어옵니다 model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels", "box_roi_pool=roi_pooler) print(\"mobilenet_v2 call2 - out_channels :1280, 19,540,921\") # 분류를 위한 입력 특징 차원을", "#model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation6(num_classes): backbone =", "= MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation5(num_classes): # COCO 에서", "#in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels #hidden_layer = 1 # and replace the mask predictor with", "torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels = 1280 #anchor_generator", "#model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, # hidden_layer, # num_classes) return model def get_model_instance_segmentation5(num_classes): # COCO", "model = FasterRCNN(backbone, num_classes=num_classes, rpn_anchor_generator=anchor_generator, box_roi_pool=roi_pooler) print(\"get_model_instance_segmentation6 call6 - out_channels :512, 4,808,441 /", "print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50) / 28,730,006 (resnet18) / 28,730,006 resnet /", "= torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) backbone = torchvision.models.densenet161(pretrained=False).features #backbone.out_channels = 1", "model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) #backbone = torchvision.models.mobilenet_v2(pretrained=False).features #backbone.out_channels =", ":256, 28,506,873 / 150M\") # 분류를 위한 입력 특징 차원을 얻습니다 #in_features =", ") #print(\"squeezenet1_0 call2 - out_channels :1280, 18,052,473 / 72M\") #print(\"squeezenet1_0 call2 - out_channels", "mobilenet : 87,366,291 squeezenet : 33,161,683 densnet : 43,702,739, resnet basicblock 3*3 ->", "import FastRCNNPredictor from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor from efficientnet_pytorch import EfficientNet from torchvision.models.detection import", "미리 학습된 인스턴스 분할 모델을 읽어옵니다 #model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False,", "get_model_instance_segmentation0(num_classes): model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False) print(\"fasterrcnn_resnet50_fpn custom call - 41,755,286 / \") return", "custom call - 41,755,286 (resnet50) / 28,730,006 (resnet18) / 28,730,006 resnet / 22,463,126", "- out_channels :256, 2,757,369 / 11M (15,000,000 / 15,000,000)\") print(\"squeezenet1_1 call2 - out_channels", "basicblock con1 : 20,195,411 / 채널 : 강제로 128 지정시 13,033,555 / 128" ]
[]
[ "GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords): lat, lon =", "locations if type(inputs) is str: inputs = [inputs] if len(inputs) == 0: raise", "city if suburb: params[\"suburb\"] = suburb if street: params[\"street\"] = street if house_number:", "= {\"key\": self.key} if location: params[\"location\"] = location requests_method = self.session.get url =", "json.dumps(inputs) post_data = list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url) def geocode_batch(self, locations:", "lon = line_of_coords.split(',') return dict(lat=lat, lon=lon) def geocode(self, location=None, country=None, city=None, suburb=None, street=None,", "zip=None, admin_level_1=None): params = {\"key\": self.key} if location: params[\"location\"] = location if country:", "\"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode(self, location=None): params = {\"key\": self.key} if location:", "self.__make_coords_dict_helper(line_of_coords), inputs)) json_string = json.dumps(inputs) post_data = list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data,", "REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class Client(object): def __init__(self, key=None, region='eu', custom_url=None, version='v0'): if not", "location=None): params = {\"key\": self.key} if location: params[\"location\"] = location requests_method = self.session.get", "= [inputs] if len(inputs) == 0: raise ValueError(\"Param locations has to contain some", "self.key = key @staticmethod def __to_inputs_data(input): if type(input) is str: return Input(input) return", "less than 10000.\") if ',' not in inputs[0]: raise ValueError(\"No comma delimiter found,", "r = requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'}) results_url = r.headers.get('location') r = requests.get(results_url)", "[inputs] if len(inputs) == 0: raise ValueError(\"Param locations has to contain some items.\")", "10000: raise ValueError(\"Param locations has to be less than 10000.\") inputs_data = list(map(self.__to_inputs_data,", "params=params) body = response.json() api_status = body[\"status\"] if api_status == \"OK\" or api_status", "requests.get(results_url) while True: retry_after = r.headers.get('retry-after') if retry_after is not None: time.sleep(int(retry_after)) r", "self.key} if location: params[\"location\"] = location requests_method = self.session.get url = self.__get_services_url_reverse_geocode() response", "street=None, house_number=None, zip=None, admin_level_1=None): params = {\"key\": self.key} if location: params[\"location\"] = location", "= self.session.get url = self.__get_services_url_geocode() response = requests_method(url, params=params) body = response.json() api_status", "requests.Session() self.key = key @staticmethod def __to_inputs_data(input): if type(input) is str: return Input(input)", "requests_method(url, params=params) body = response.json() api_status = body[\"status\"] if api_status == \"OK\" or", "= suburb if street: params[\"street\"] = street if house_number: params[\"house_number\"] = house_number if", "inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string = json.dumps(inputs) post_data = list(json.loads(json_string)) services_url", "import requests from sygicmaps.input import Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH", "inputs = locations if type(inputs) is str: inputs = [inputs] if len(inputs) ==", "x: x.__dict__) post_data = list(json.loads(json_string)) post_data = list(map(self.__remove_nulls, post_data)) services_url = self.__get_services_url_geocode_batch() return", "admin_level_1 requests_method = self.session.get url = self.__get_services_url_geocode() response = requests_method(url, params=params) body =", "REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords): lat, lon = line_of_coords.split(',') return dict(lat=lat, lon=lon) def geocode(self,", "not None} def __get_services_url_geocode(self): return self.services_url + GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return self.services_url +", "params[\"suburb\"] = suburb if street: params[\"street\"] = street if house_number: params[\"house_number\"] = house_number", "= requests.Session() self.key = key @staticmethod def __to_inputs_data(input): if type(input) is str: return", "r.headers.get('location') r = requests.get(results_url) while True: retry_after = r.headers.get('retry-after') if retry_after is not", "def __get_services_url_geocode_batch(self): return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod", "\"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode(self, location=None): params =", "json import time import requests from sygicmaps.input import Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH", "not in inputs[0]: raise ValueError(\"No comma delimiter found, please verify that location input", "self.version = version self.session = requests.Session() self.key = key @staticmethod def __to_inputs_data(input): if", "params[\"house_number\"] = house_number if zip: params[\"zip\"] = zip if admin_level_1: params[\"admin_level_1\"] = admin_level_1", "if len(inputs) == 0: raise ValueError(\"Param locations has to contain some items.\") if", "r.headers.get('retry-after') if retry_after is not None: time.sleep(int(retry_after)) r = requests.get(results_url) continue break body", "items.\") if len(inputs) >= 10000: raise ValueError(\"Param locations has to be less than", "v is not None} def __get_services_url_geocode(self): return self.services_url + GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return", "inputs)) json_string = json.dumps(inputs) post_data = list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url)", "response = requests_method(url, params=params) body = response.json() api_status = body[\"status\"] if api_status ==", "@staticmethod def __to_inputs_data(input): if type(input) is str: return Input(input) return input @staticmethod def", "raise ValueError(\"Param locations has to contain some items.\") if len(inputs) >= 10000: raise", "json.dumps(inputs_data, default=lambda x: x.__dict__) post_data = list(json.loads(json_string)) post_data = list(map(self.__remove_nulls, post_data)) services_url =", "not None: time.sleep(int(retry_after)) r = requests.get(results_url) continue break body = r.json() api_status =", "requests_method = self.session.get url = self.__get_services_url_reverse_geocode() response = requests_method(url, params=params) body = response.json()", "self.__geocode_batch_base(post_data, services_url) def geocode_batch(self, locations: list): inputs = locations if type(inputs) is str:", "api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode_batch(self, locations: list): inputs = locations", "v for k, v in d.items() if v is not None} def __get_services_url_geocode(self):", "str: return Input(input) return input @staticmethod def __remove_nulls(d): return {k: v for k,", "def __get_services_url_reverse_geocode_batch(self): return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def", "house_number: params[\"house_number\"] = house_number if zip: params[\"zip\"] = zip if admin_level_1: params[\"admin_level_1\"] =", "self.__get_services_url_reverse_geocode() response = requests_method(url, params=params) body = response.json() api_status = body[\"status\"] if api_status", "url = self.__get_services_url_geocode() response = requests_method(url, params=params) body = response.json() api_status = body[\"status\"]", "== \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def __geocode_batch_base(self, post_data, services_url):", "= admin_level_1 requests_method = self.session.get url = self.__get_services_url_geocode() response = requests_method(url, params=params) body", "',' not in inputs[0]: raise ValueError(\"No comma delimiter found, please verify that location", "Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\"", "= json.dumps(post_data) r = requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'}) results_url = r.headers.get('location') r", "if api_status == \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode(self,", "__get_services_url_reverse_geocode(self): return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords): lat, lon = line_of_coords.split(',') return", "= list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string = json.dumps(inputs) post_data = list(json.loads(json_string)) services_url =", "json.dumps(post_data) r = requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'}) results_url = r.headers.get('location') r =", "post_data = list(json.loads(json_string)) post_data = list(map(self.__remove_nulls, post_data)) services_url = self.__get_services_url_geocode_batch() return self.__geocode_batch_base(post_data, services_url)", "= self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url) def geocode_batch(self, locations: list): inputs = locations if", "contain some items.\") if len(inputs) >= 10000: raise ValueError(\"Param locations has to be", "def __to_inputs_data(input): if type(input) is str: return Input(input) return input @staticmethod def __remove_nulls(d):", "params[\"zip\"] = zip if admin_level_1: params[\"admin_level_1\"] = admin_level_1 requests_method = self.session.get url =", "to contain some items.\") if len(inputs) >= 10000: raise ValueError(\"Param locations has to", "10000: raise ValueError(\"Param locations has to be less than 10000.\") if ',' not", "if type(input) is str: return Input(input) return input @staticmethod def __remove_nulls(d): return {k:", "input format is list of LAT,LON\") inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string", "def geocode_batch(self, locations: list): inputs = locations if type(inputs) is str: inputs =", "key is not set.\") self.custom_url = custom_url self.services_url = SERVICES_URL.format(region) self.version = version", "list): inputs = locations if type(inputs) is str: inputs = [inputs] if len(inputs)", "raise ValueError(\"No comma delimiter found, please verify that location input format is list", "= json.dumps(inputs) post_data = list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url) def geocode_batch(self,", "of LAT,LON\") inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string = json.dumps(inputs) post_data =", "self.services_url = SERVICES_URL.format(region) self.version = version self.session = requests.Session() self.key = key @staticmethod", "import Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH =", "or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode(self, location=None): params = {\"key\":", "\"NO_RESULTS\": return body.get(\"results\", []) def __geocode_batch_base(self, post_data, services_url): url = services_url params =", "= r.json() api_status = body[\"status\"] if api_status == \"OK\" or api_status == \"NO_RESULTS\":", "self.key} post_body = json.dumps(post_data) r = requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'}) results_url =", "0: raise ValueError(\"Param locations has to contain some items.\") if len(inputs) >= 10000:", "__get_services_url_geocode_batch(self): return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def", "= \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class Client(object): def __init__(self, key=None,", "[]) def __geocode_batch_base(self, post_data, services_url): url = services_url params = {\"key\": self.key} post_body", "self.session.get url = self.__get_services_url_reverse_geocode() response = requests_method(url, params=params) body = response.json() api_status =", "post_data, services_url): url = services_url params = {\"key\": self.key} post_body = json.dumps(post_data) r", "list of LAT,LON\") inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string = json.dumps(inputs) post_data", "requests_method = self.session.get url = self.__get_services_url_geocode() response = requests_method(url, params=params) body = response.json()", "version='v0'): if not key: raise ValueError(\"API key is not set.\") self.custom_url = custom_url", "len(inputs) >= 10000: raise ValueError(\"Param locations has to be less than 10000.\") if", "+ REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords): lat, lon = line_of_coords.split(',') return dict(lat=lat, lon=lon) def", "requests.get(results_url) continue break body = r.json() api_status = body[\"status\"] if api_status == \"OK\"", "True: retry_after = r.headers.get('retry-after') if retry_after is not None: time.sleep(int(retry_after)) r = requests.get(results_url)", "== 0: raise ValueError(\"Param locations has to contain some items.\") if len(inputs) >=", "location=None, country=None, city=None, suburb=None, street=None, house_number=None, zip=None, admin_level_1=None): params = {\"key\": self.key} if", "body = response.json() api_status = body[\"status\"] if api_status == \"OK\" or api_status ==", "Input(input) return input @staticmethod def __remove_nulls(d): return {k: v for k, v in", "= requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'}) results_url = r.headers.get('location') r = requests.get(results_url) while", "locations: list): inputs = locations if type(inputs) is str: inputs = [inputs] if", "= requests.get(results_url) while True: retry_after = r.headers.get('retry-after') if retry_after is not None: time.sleep(int(retry_after))", "api_status = body[\"status\"] if api_status == \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\",", "def reverse_geocode_batch(self, locations: list): inputs = locations if type(inputs) is str: inputs =", "+ REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return self.services_url +", "services_url params = {\"key\": self.key} post_body = json.dumps(post_data) r = requests.post(url, data=post_body, params=params,", "please verify that location input format is list of LAT,LON\") inputs = list(map(lambda", "sygicmaps.input import Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH", "reverse_geocode(self, location=None): params = {\"key\": self.key} if location: params[\"location\"] = location requests_method =", "if suburb: params[\"suburb\"] = suburb if street: params[\"street\"] = street if house_number: params[\"house_number\"]", "type(inputs) is str: inputs = [inputs] if len(inputs) == 0: raise ValueError(\"Param locations", "params[\"country\"] = country if city: params[\"city\"] = city if suburb: params[\"suburb\"] = suburb", "= house_number if zip: params[\"zip\"] = zip if admin_level_1: params[\"admin_level_1\"] = admin_level_1 requests_method", "if city: params[\"city\"] = city if suburb: params[\"suburb\"] = suburb if street: params[\"street\"]", "return {k: v for k, v in d.items() if v is not None}", "= requests_method(url, params=params) body = response.json() api_status = body[\"status\"] if api_status == \"OK\"", "= location requests_method = self.session.get url = self.__get_services_url_reverse_geocode() response = requests_method(url, params=params) body", "body.get(\"results\", []) def reverse_geocode_batch(self, locations: list): inputs = locations if type(inputs) is str:", "if location: params[\"location\"] = location if country: params[\"country\"] = country if city: params[\"city\"]", "@staticmethod def __make_coords_dict_helper(line_of_coords): lat, lon = line_of_coords.split(',') return dict(lat=lat, lon=lon) def geocode(self, location=None,", "street if house_number: params[\"house_number\"] = house_number if zip: params[\"zip\"] = zip if admin_level_1:", "ValueError(\"Param locations has to be less than 10000.\") if ',' not in inputs[0]:", "default=lambda x: x.__dict__) post_data = list(json.loads(json_string)) post_data = list(map(self.__remove_nulls, post_data)) services_url = self.__get_services_url_geocode_batch()", "GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class", "params[\"city\"] = city if suburb: params[\"suburb\"] = suburb if street: params[\"street\"] = street", "def __geocode_batch_base(self, post_data, services_url): url = services_url params = {\"key\": self.key} post_body =", "== \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode_batch(self, locations: list): inputs = locations if", "continue break body = r.json() api_status = body[\"status\"] if api_status == \"OK\" or", "def __init__(self, key=None, region='eu', custom_url=None, version='v0'): if not key: raise ValueError(\"API key is", "len(inputs) == 0: raise ValueError(\"Param locations has to contain some items.\") if len(inputs)", "\"/{}/api/batch/reversegeocode\" class Client(object): def __init__(self, key=None, region='eu', custom_url=None, version='v0'): if not key: raise", "inputs_data = list(map(self.__to_inputs_data, inputs)) json_string = json.dumps(inputs_data, default=lambda x: x.__dict__) post_data = list(json.loads(json_string))", "while True: retry_after = r.headers.get('retry-after') if retry_after is not None: time.sleep(int(retry_after)) r =", "results_url = r.headers.get('location') r = requests.get(results_url) while True: retry_after = r.headers.get('retry-after') if retry_after", "self.__get_services_url_geocode() response = requests_method(url, params=params) body = response.json() api_status = body[\"status\"] if api_status", "self.session.get url = self.__get_services_url_geocode() response = requests_method(url, params=params) body = response.json() api_status =", "\"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class Client(object): def __init__(self, key=None, region='eu',", "= street if house_number: params[\"house_number\"] = house_number if zip: params[\"zip\"] = zip if", "def reverse_geocode(self, location=None): params = {\"key\": self.key} if location: params[\"location\"] = location requests_method", "key @staticmethod def __to_inputs_data(input): if type(input) is str: return Input(input) return input @staticmethod", "less than 10000.\") inputs_data = list(map(self.__to_inputs_data, inputs)) json_string = json.dumps(inputs_data, default=lambda x: x.__dict__)", "api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode(self, location=None): params = {\"key\": self.key}", "__to_inputs_data(input): if type(input) is str: return Input(input) return input @staticmethod def __remove_nulls(d): return", "len(inputs) >= 10000: raise ValueError(\"Param locations has to be less than 10000.\") inputs_data", "params = {\"key\": self.key} if location: params[\"location\"] = location requests_method = self.session.get url", "requests from sygicmaps.input import Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH =", "location if country: params[\"country\"] = country if city: params[\"city\"] = city if suburb:", "x.__dict__) post_data = list(json.loads(json_string)) post_data = list(map(self.__remove_nulls, post_data)) services_url = self.__get_services_url_geocode_batch() return self.__geocode_batch_base(post_data,", "self.custom_url = custom_url self.services_url = SERVICES_URL.format(region) self.version = version self.session = requests.Session() self.key", "{\"key\": self.key} if location: params[\"location\"] = location requests_method = self.session.get url = self.__get_services_url_reverse_geocode()", "headers={'Content-type': 'application/json'}) results_url = r.headers.get('location') r = requests.get(results_url) while True: retry_after = r.headers.get('retry-after')", "house_number if zip: params[\"zip\"] = zip if admin_level_1: params[\"admin_level_1\"] = admin_level_1 requests_method =", "\"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class Client(object): def __init__(self, key=None, region='eu', custom_url=None, version='v0'): if", "not key: raise ValueError(\"API key is not set.\") self.custom_url = custom_url self.services_url =", "r = requests.get(results_url) while True: retry_after = r.headers.get('retry-after') if retry_after is not None:", "\"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class Client(object): def", "return dict(lat=lat, lon=lon) def geocode(self, location=None, country=None, city=None, suburb=None, street=None, house_number=None, zip=None, admin_level_1=None):", "def __get_services_url_geocode(self): return self.services_url + GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def", "api_status == \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode(self, location=None):", "be less than 10000.\") inputs_data = list(map(self.__to_inputs_data, inputs)) json_string = json.dumps(inputs_data, default=lambda x:", "version self.session = requests.Session() self.key = key @staticmethod def __to_inputs_data(input): if type(input) is", ">= 10000: raise ValueError(\"Param locations has to be less than 10000.\") if ','", "inputs)) json_string = json.dumps(inputs_data, default=lambda x: x.__dict__) post_data = list(json.loads(json_string)) post_data = list(map(self.__remove_nulls,", "has to be less than 10000.\") inputs_data = list(map(self.__to_inputs_data, inputs)) json_string = json.dumps(inputs_data,", "url = services_url params = {\"key\": self.key} post_body = json.dumps(post_data) r = requests.post(url,", "has to contain some items.\") if len(inputs) >= 10000: raise ValueError(\"Param locations has", "list(map(self.__to_inputs_data, inputs)) json_string = json.dumps(inputs_data, default=lambda x: x.__dict__) post_data = list(json.loads(json_string)) post_data =", ">= 10000: raise ValueError(\"Param locations has to be less than 10000.\") inputs_data =", "requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'}) results_url = r.headers.get('location') r = requests.get(results_url) while True:", "country: params[\"country\"] = country if city: params[\"city\"] = city if suburb: params[\"suburb\"] =", "v in d.items() if v is not None} def __get_services_url_geocode(self): return self.services_url +", "= zip if admin_level_1: params[\"admin_level_1\"] = admin_level_1 requests_method = self.session.get url = self.__get_services_url_geocode()", "= self.__get_services_url_reverse_geocode() response = requests_method(url, params=params) body = response.json() api_status = body[\"status\"] if", "geocode(self, location=None, country=None, city=None, suburb=None, street=None, house_number=None, zip=None, admin_level_1=None): params = {\"key\": self.key}", "self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords): lat, lon = line_of_coords.split(',') return dict(lat=lat, lon=lon)", "than 10000.\") if ',' not in inputs[0]: raise ValueError(\"No comma delimiter found, please", "\"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\"", "if street: params[\"street\"] = street if house_number: params[\"house_number\"] = house_number if zip: params[\"zip\"]", "GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version)", "= \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class Client(object): def __init__(self, key=None, region='eu', custom_url=None, version='v0'):", "lon=lon) def geocode(self, location=None, country=None, city=None, suburb=None, street=None, house_number=None, zip=None, admin_level_1=None): params =", "zip if admin_level_1: params[\"admin_level_1\"] = admin_level_1 requests_method = self.session.get url = self.__get_services_url_geocode() response", "REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class Client(object): def __init__(self, key=None, region='eu', custom_url=None,", "inputs = [inputs] if len(inputs) == 0: raise ValueError(\"Param locations has to contain", "class Client(object): def __init__(self, key=None, region='eu', custom_url=None, version='v0'): if not key: raise ValueError(\"API", "street: params[\"street\"] = street if house_number: params[\"house_number\"] = house_number if zip: params[\"zip\"] =", "return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords): lat, lon = line_of_coords.split(',') return dict(lat=lat,", "post_body = json.dumps(post_data) r = requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'}) results_url = r.headers.get('location')", "== \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode_batch(self, locations: list):", "None: time.sleep(int(retry_after)) r = requests.get(results_url) continue break body = r.json() api_status = body[\"status\"]", "api_status == \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def __geocode_batch_base(self, post_data,", "= key @staticmethod def __to_inputs_data(input): if type(input) is str: return Input(input) return input", "some items.\") if len(inputs) >= 10000: raise ValueError(\"Param locations has to be less", "suburb: params[\"suburb\"] = suburb if street: params[\"street\"] = street if house_number: params[\"house_number\"] =", "retry_after = r.headers.get('retry-after') if retry_after is not None: time.sleep(int(retry_after)) r = requests.get(results_url) continue", "region='eu', custom_url=None, version='v0'): if not key: raise ValueError(\"API key is not set.\") self.custom_url", "k, v in d.items() if v is not None} def __get_services_url_geocode(self): return self.services_url", "api_status == \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode_batch(self, locations:", "custom_url=None, version='v0'): if not key: raise ValueError(\"API key is not set.\") self.custom_url =", "if not key: raise ValueError(\"API key is not set.\") self.custom_url = custom_url self.services_url", "@staticmethod def __remove_nulls(d): return {k: v for k, v in d.items() if v", "def geocode(self, location=None, country=None, city=None, suburb=None, street=None, house_number=None, zip=None, admin_level_1=None): params = {\"key\":", "is str: return Input(input) return input @staticmethod def __remove_nulls(d): return {k: v for", "city=None, suburb=None, street=None, house_number=None, zip=None, admin_level_1=None): params = {\"key\": self.key} if location: params[\"location\"]", "return body.get(\"results\", []) def reverse_geocode_batch(self, locations: list): inputs = locations if type(inputs) is", "country=None, city=None, suburb=None, street=None, house_number=None, zip=None, admin_level_1=None): params = {\"key\": self.key} if location:", "location requests_method = self.session.get url = self.__get_services_url_reverse_geocode() response = requests_method(url, params=params) body =", "if zip: params[\"zip\"] = zip if admin_level_1: params[\"admin_level_1\"] = admin_level_1 requests_method = self.session.get", "__make_coords_dict_helper(line_of_coords): lat, lon = line_of_coords.split(',') return dict(lat=lat, lon=lon) def geocode(self, location=None, country=None, city=None,", "set.\") self.custom_url = custom_url self.services_url = SERVICES_URL.format(region) self.version = version self.session = requests.Session()", "dict(lat=lat, lon=lon) def geocode(self, location=None, country=None, city=None, suburb=None, street=None, house_number=None, zip=None, admin_level_1=None): params", "r = requests.get(results_url) continue break body = r.json() api_status = body[\"status\"] if api_status", "locations has to contain some items.\") if len(inputs) >= 10000: raise ValueError(\"Param locations", "params = {\"key\": self.key} if location: params[\"location\"] = location if country: params[\"country\"] =", "raise ValueError(\"Param locations has to be less than 10000.\") if ',' not in", "{\"key\": self.key} post_body = json.dumps(post_data) r = requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'}) results_url", "services_url = self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url) def geocode_batch(self, locations: list): inputs = locations", "= \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class Client(object):", "if admin_level_1: params[\"admin_level_1\"] = admin_level_1 requests_method = self.session.get url = self.__get_services_url_geocode() response =", "key: raise ValueError(\"API key is not set.\") self.custom_url = custom_url self.services_url = SERVICES_URL.format(region)", "ValueError(\"API key is not set.\") self.custom_url = custom_url self.services_url = SERVICES_URL.format(region) self.version =", "= json.dumps(inputs_data, default=lambda x: x.__dict__) post_data = list(json.loads(json_string)) post_data = list(map(self.__remove_nulls, post_data)) services_url", "def __make_coords_dict_helper(line_of_coords): lat, lon = line_of_coords.split(',') return dict(lat=lat, lon=lon) def geocode(self, location=None, country=None,", "admin_level_1=None): params = {\"key\": self.key} if location: params[\"location\"] = location if country: params[\"country\"]", "10000.\") inputs_data = list(map(self.__to_inputs_data, inputs)) json_string = json.dumps(inputs_data, default=lambda x: x.__dict__) post_data =", "d.items() if v is not None} def __get_services_url_geocode(self): return self.services_url + GEOCODE_URL_PATH.format(self.version) def", "__get_services_url_reverse_geocode_batch(self): return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self):", "body.get(\"results\", []) def __geocode_batch_base(self, post_data, services_url): url = services_url params = {\"key\": self.key}", "<gh_stars>1-10 import json import time import requests from sygicmaps.input import Input SERVICES_URL =", "not set.\") self.custom_url = custom_url self.services_url = SERVICES_URL.format(region) self.version = version self.session =", "== \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode(self, location=None): params", "+ GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return self.services_url +", "[]) def reverse_geocode(self, location=None): params = {\"key\": self.key} if location: params[\"location\"] = location", "return self.__geocode_batch_base(post_data, services_url) def geocode_batch(self, locations: list): inputs = locations if type(inputs) is", "params[\"admin_level_1\"] = admin_level_1 requests_method = self.session.get url = self.__get_services_url_geocode() response = requests_method(url, params=params)", "body = r.json() api_status = body[\"status\"] if api_status == \"OK\" or api_status ==", "geocode_batch(self, locations: list): inputs = locations if type(inputs) is str: inputs = [inputs]", "= line_of_coords.split(',') return dict(lat=lat, lon=lon) def geocode(self, location=None, country=None, city=None, suburb=None, street=None, house_number=None,", "return self.services_url + GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return", "self.services_url + GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return self.services_url", "is not None: time.sleep(int(retry_after)) r = requests.get(results_url) continue break body = r.json() api_status", "if len(inputs) >= 10000: raise ValueError(\"Param locations has to be less than 10000.\")", "country if city: params[\"city\"] = city if suburb: params[\"suburb\"] = suburb if street:", "is not None} def __get_services_url_geocode(self): return self.services_url + GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return self.services_url", "key=None, region='eu', custom_url=None, version='v0'): if not key: raise ValueError(\"API key is not set.\")", "ValueError(\"Param locations has to be less than 10000.\") inputs_data = list(map(self.__to_inputs_data, inputs)) json_string", "[]) def reverse_geocode_batch(self, locations: list): inputs = locations if type(inputs) is str: inputs", "json_string = json.dumps(inputs_data, default=lambda x: x.__dict__) post_data = list(json.loads(json_string)) post_data = list(map(self.__remove_nulls, post_data))", "import json import time import requests from sygicmaps.input import Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\"", "REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version)", "= requests.get(results_url) continue break body = r.json() api_status = body[\"status\"] if api_status ==", "has to be less than 10000.\") if ',' not in inputs[0]: raise ValueError(\"No", "line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string = json.dumps(inputs) post_data = list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch() return", "locations has to be less than 10000.\") inputs_data = list(map(self.__to_inputs_data, inputs)) json_string =", "list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url) def geocode_batch(self, locations: list): inputs =", "= \"/{}/api/batch/reversegeocode\" class Client(object): def __init__(self, key=None, region='eu', custom_url=None, version='v0'): if not key:", "raise ValueError(\"Param locations has to be less than 10000.\") inputs_data = list(map(self.__to_inputs_data, inputs))", "to be less than 10000.\") inputs_data = list(map(self.__to_inputs_data, inputs)) json_string = json.dumps(inputs_data, default=lambda", "return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return", "ValueError(\"Param locations has to contain some items.\") if len(inputs) >= 10000: raise ValueError(\"Param", "input @staticmethod def __remove_nulls(d): return {k: v for k, v in d.items() if", "= list(map(self.__to_inputs_data, inputs)) json_string = json.dumps(inputs_data, default=lambda x: x.__dict__) post_data = list(json.loads(json_string)) post_data", "= city if suburb: params[\"suburb\"] = suburb if street: params[\"street\"] = street if", "= \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH =", "__remove_nulls(d): return {k: v for k, v in d.items() if v is not", "= body[\"status\"] if api_status == \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", [])", "= version self.session = requests.Session() self.key = key @staticmethod def __to_inputs_data(input): if type(input)", "suburb=None, street=None, house_number=None, zip=None, admin_level_1=None): params = {\"key\": self.key} if location: params[\"location\"] =", "time import requests from sygicmaps.input import Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\"", "= self.__get_services_url_geocode() response = requests_method(url, params=params) body = response.json() api_status = body[\"status\"] if", "= self.session.get url = self.__get_services_url_reverse_geocode() response = requests_method(url, params=params) body = response.json() api_status", "= location if country: params[\"country\"] = country if city: params[\"city\"] = city if", "return Input(input) return input @staticmethod def __remove_nulls(d): return {k: v for k, v", "body.get(\"results\", []) def reverse_geocode(self, location=None): params = {\"key\": self.key} if location: params[\"location\"] =", "if type(inputs) is str: inputs = [inputs] if len(inputs) == 0: raise ValueError(\"Param", "= {\"key\": self.key} if location: params[\"location\"] = location if country: params[\"country\"] = country", "__get_services_url_geocode(self): return self.services_url + GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self):", "verify that location input format is list of LAT,LON\") inputs = list(map(lambda line_of_coords:", "str: inputs = [inputs] if len(inputs) == 0: raise ValueError(\"Param locations has to", "return input @staticmethod def __remove_nulls(d): return {k: v for k, v in d.items()", "if ',' not in inputs[0]: raise ValueError(\"No comma delimiter found, please verify that", "comma delimiter found, please verify that location input format is list of LAT,LON\")", "is list of LAT,LON\") inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string = json.dumps(inputs)", "Client(object): def __init__(self, key=None, region='eu', custom_url=None, version='v0'): if not key: raise ValueError(\"API key", "location: params[\"location\"] = location if country: params[\"country\"] = country if city: params[\"city\"] =", "{\"key\": self.key} if location: params[\"location\"] = location if country: params[\"country\"] = country if", "LAT,LON\") inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string = json.dumps(inputs) post_data = list(json.loads(json_string))", "SERVICES_URL.format(region) self.version = version self.session = requests.Session() self.key = key @staticmethod def __to_inputs_data(input):", "\"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode_batch(self, locations: list): inputs = locations if type(inputs)", "if location: params[\"location\"] = location requests_method = self.session.get url = self.__get_services_url_reverse_geocode() response =", "return body.get(\"results\", []) def __geocode_batch_base(self, post_data, services_url): url = services_url params = {\"key\":", "raise ValueError(\"API key is not set.\") self.custom_url = custom_url self.services_url = SERVICES_URL.format(region) self.version", "be less than 10000.\") if ',' not in inputs[0]: raise ValueError(\"No comma delimiter", "break body = r.json() api_status = body[\"status\"] if api_status == \"OK\" or api_status", "type(input) is str: return Input(input) return input @staticmethod def __remove_nulls(d): return {k: v", "or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def __geocode_batch_base(self, post_data, services_url): url =", "r.json() api_status = body[\"status\"] if api_status == \"OK\" or api_status == \"NO_RESULTS\": return", "format is list of LAT,LON\") inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string =", "delimiter found, please verify that location input format is list of LAT,LON\") inputs", "params=params, headers={'Content-type': 'application/json'}) results_url = r.headers.get('location') r = requests.get(results_url) while True: retry_after =", "location: params[\"location\"] = location requests_method = self.session.get url = self.__get_services_url_reverse_geocode() response = requests_method(url,", "inputs[0]: raise ValueError(\"No comma delimiter found, please verify that location input format is", "= list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url) def geocode_batch(self, locations: list): inputs", "to be less than 10000.\") if ',' not in inputs[0]: raise ValueError(\"No comma", "= locations if type(inputs) is str: inputs = [inputs] if len(inputs) == 0:", "ValueError(\"No comma delimiter found, please verify that location input format is list of", "'application/json'}) results_url = r.headers.get('location') r = requests.get(results_url) while True: retry_after = r.headers.get('retry-after') if", "None} def __get_services_url_geocode(self): return self.services_url + GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self): return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version)", "if api_status == \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode_batch(self,", "params[\"street\"] = street if house_number: params[\"house_number\"] = house_number if zip: params[\"zip\"] = zip", "time.sleep(int(retry_after)) r = requests.get(results_url) continue break body = r.json() api_status = body[\"status\"] if", "params[\"location\"] = location requests_method = self.session.get url = self.__get_services_url_reverse_geocode() response = requests_method(url, params=params)", "\"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode_batch(self, locations: list): inputs", "or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode_batch(self, locations: list): inputs =", "url = self.__get_services_url_reverse_geocode() response = requests_method(url, params=params) body = response.json() api_status = body[\"status\"]", "= r.headers.get('retry-after') if retry_after is not None: time.sleep(int(retry_after)) r = requests.get(results_url) continue break", "def __remove_nulls(d): return {k: v for k, v in d.items() if v is", "services_url): url = services_url params = {\"key\": self.key} post_body = json.dumps(post_data) r =", "services_url) def geocode_batch(self, locations: list): inputs = locations if type(inputs) is str: inputs", "line_of_coords.split(',') return dict(lat=lat, lon=lon) def geocode(self, location=None, country=None, city=None, suburb=None, street=None, house_number=None, zip=None,", "zip: params[\"zip\"] = zip if admin_level_1: params[\"admin_level_1\"] = admin_level_1 requests_method = self.session.get url", "= {\"key\": self.key} post_body = json.dumps(post_data) r = requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'})", "self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords): lat,", "if country: params[\"country\"] = country if city: params[\"city\"] = city if suburb: params[\"suburb\"]", "== \"NO_RESULTS\": return body.get(\"results\", []) def __geocode_batch_base(self, post_data, services_url): url = services_url params", "GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/reversegeocode\" class Client(object): def __init__(self,", "__init__(self, key=None, region='eu', custom_url=None, version='v0'): if not key: raise ValueError(\"API key is not", "if retry_after is not None: time.sleep(int(retry_after)) r = requests.get(results_url) continue break body =", "in d.items() if v is not None} def __get_services_url_geocode(self): return self.services_url + GEOCODE_URL_PATH.format(self.version)", "self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_geocode_batch(self): return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return self.services_url", "city: params[\"city\"] = city if suburb: params[\"suburb\"] = suburb if street: params[\"street\"] =", "is str: inputs = [inputs] if len(inputs) == 0: raise ValueError(\"Param locations has", "in inputs[0]: raise ValueError(\"No comma delimiter found, please verify that location input format", "= r.headers.get('location') r = requests.get(results_url) while True: retry_after = r.headers.get('retry-after') if retry_after is", "response.json() api_status = body[\"status\"] if api_status == \"OK\" or api_status == \"NO_RESULTS\": return", "lat, lon = line_of_coords.split(',') return dict(lat=lat, lon=lon) def geocode(self, location=None, country=None, city=None, suburb=None,", "post_data = list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url) def geocode_batch(self, locations: list):", "+ GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords): lat, lon", "found, please verify that location input format is list of LAT,LON\") inputs =", "location input format is list of LAT,LON\") inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs))", "params[\"location\"] = location if country: params[\"country\"] = country if city: params[\"city\"] = city", "json_string = json.dumps(inputs) post_data = list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url) def", "from sygicmaps.input import Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\"", "data=post_body, params=params, headers={'Content-type': 'application/json'}) results_url = r.headers.get('location') r = requests.get(results_url) while True: retry_after", "if api_status == \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def __geocode_batch_base(self,", "__geocode_batch_base(self, post_data, services_url): url = services_url params = {\"key\": self.key} post_body = json.dumps(post_data)", "suburb if street: params[\"street\"] = street if house_number: params[\"house_number\"] = house_number if zip:", "return body.get(\"results\", []) def reverse_geocode(self, location=None): params = {\"key\": self.key} if location: params[\"location\"]", "= services_url params = {\"key\": self.key} post_body = json.dumps(post_data) r = requests.post(url, data=post_body,", "params = {\"key\": self.key} post_body = json.dumps(post_data) r = requests.post(url, data=post_body, params=params, headers={'Content-type':", "body[\"status\"] if api_status == \"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def", "def __get_services_url_reverse_geocode(self): return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords): lat, lon = line_of_coords.split(',')", "list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs)) json_string = json.dumps(inputs) post_data = list(json.loads(json_string)) services_url = self.__get_services_url_reverse_geocode_batch()", "api_status == \"NO_RESULTS\": return body.get(\"results\", []) def __geocode_batch_base(self, post_data, services_url): url = services_url", "reverse_geocode_batch(self, locations: list): inputs = locations if type(inputs) is str: inputs = [inputs]", "self.key} if location: params[\"location\"] = location if country: params[\"country\"] = country if city:", "= SERVICES_URL.format(region) self.version = version self.session = requests.Session() self.key = key @staticmethod def", "admin_level_1: params[\"admin_level_1\"] = admin_level_1 requests_method = self.session.get url = self.__get_services_url_geocode() response = requests_method(url,", "for k, v in d.items() if v is not None} def __get_services_url_geocode(self): return", "retry_after is not None: time.sleep(int(retry_after)) r = requests.get(results_url) continue break body = r.json()", "than 10000.\") inputs_data = list(map(self.__to_inputs_data, inputs)) json_string = json.dumps(inputs_data, default=lambda x: x.__dict__) post_data", "= country if city: params[\"city\"] = city if suburb: params[\"suburb\"] = suburb if", "= response.json() api_status = body[\"status\"] if api_status == \"OK\" or api_status == \"NO_RESULTS\":", "self.session = requests.Session() self.key = key @staticmethod def __to_inputs_data(input): if type(input) is str:", "{k: v for k, v in d.items() if v is not None} def", "== \"NO_RESULTS\": return body.get(\"results\", []) def reverse_geocode(self, location=None): params = {\"key\": self.key} if", "if house_number: params[\"house_number\"] = house_number if zip: params[\"zip\"] = zip if admin_level_1: params[\"admin_level_1\"]", "house_number=None, zip=None, admin_level_1=None): params = {\"key\": self.key} if location: params[\"location\"] = location if", "10000.\") if ',' not in inputs[0]: raise ValueError(\"No comma delimiter found, please verify", "import time import requests from sygicmaps.input import Input SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH =", "\"OK\" or api_status == \"NO_RESULTS\": return body.get(\"results\", []) def __geocode_batch_base(self, post_data, services_url): url", "locations has to be less than 10000.\") if ',' not in inputs[0]: raise", "custom_url self.services_url = SERVICES_URL.format(region) self.version = version self.session = requests.Session() self.key = key", "self.__get_services_url_reverse_geocode_batch() return self.__geocode_batch_base(post_data, services_url) def geocode_batch(self, locations: list): inputs = locations if type(inputs)", "is not set.\") self.custom_url = custom_url self.services_url = SERVICES_URL.format(region) self.version = version self.session", "that location input format is list of LAT,LON\") inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords),", "SERVICES_URL = \"https://{}-geocoding.api.sygic.com\" GEOCODE_URL_PATH = \"/{}/api/geocode\" GEOCODE_BATCH_URL_PATH = \"/{}/api/batch/geocode\" REVERSE_GEOCODE_URL_PATH = \"/{}/api/reversegeocode\" REVERSE_GEOCODE_BATCH_URL_PATH", "= custom_url self.services_url = SERVICES_URL.format(region) self.version = version self.session = requests.Session() self.key =", "if v is not None} def __get_services_url_geocode(self): return self.services_url + GEOCODE_URL_PATH.format(self.version) def __get_services_url_reverse_geocode_batch(self):", "return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version) def __get_services_url_reverse_geocode(self): return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version) @staticmethod def __make_coords_dict_helper(line_of_coords):" ]
[ "authenticator(): return MockAuthenticator() @pytest.fixture def db(table) -> DBBackend: return DynamoDBBackend(table) @pytest.fixture def storage(bucket)", "TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\": \"S\"}, ], KeySchema=[ {\"AttributeName\":", "@pytest.fixture async def page(): from pyppeteer import launch browser = await launch({\"headless\": True})", "{ \"IndexName\": \"sk_gsi\", \"KeySchema\": [ {\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"},", "@pytest.fixture def table(): \"\"\"Pytest fixture that creates the table in the fake moto", "the fake moto AWS account \"\"\" with mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield", "in the fake moto AWS account \"\"\" with mock_s3(): s3 = boto3.resource(\"s3\", region_name=\"us-east-1\")", "import DynamoDBBackend from warehouse14.storage import S3Storage @pytest.fixture def bucket(): \"\"\"Pytest fixture that creates", "@pytest.fixture def storage(bucket) -> PackageStorage: return S3Storage(bucket) @pytest.fixture async def page(): from pyppeteer", "creates the table in the fake moto AWS account \"\"\" with mock_dynamodb2(): dynamodb", "GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\", \"KeySchema\": [ {\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\":", "return DynamoDBBackend(table) @pytest.fixture def storage(bucket) -> PackageStorage: return S3Storage(bucket) @pytest.fixture async def page():", "def db(table) -> DBBackend: return DynamoDBBackend(table) @pytest.fixture def storage(bucket) -> PackageStorage: return S3Storage(bucket)", "\"AttributeType\": \"S\"}, ], KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"}, ],", "], ) @pytest.fixture def authenticator(): return MockAuthenticator() @pytest.fixture def db(table) -> DBBackend: return", "boto3 import pytest from moto import mock_dynamodb2, mock_s3 from tests.local_login import MockAuthenticator from", "\"sk\", \"AttributeType\": \"S\"}, ], KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"},", "uuid4 import boto3 import pytest from moto import mock_dynamodb2, mock_s3 from tests.local_login import", "fake moto AWS account \"\"\" with mock_s3(): s3 = boto3.resource(\"s3\", region_name=\"us-east-1\") bucket =", "{\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"}, ], \"Projection\": { \"ProjectionType\": \"ALL\", }, } ], )", "\"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\": \"S\"}, ], KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\",", "storage(bucket) -> PackageStorage: return S3Storage(bucket) @pytest.fixture async def page(): from pyppeteer import launch", "region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4())) bucket.create() yield bucket @pytest.fixture def table(): \"\"\"Pytest fixture that", "dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\":", "from uuid import uuid4 import boto3 import pytest from moto import mock_dynamodb2, mock_s3", "= boto3.resource(\"s3\", region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4())) bucket.create() yield bucket @pytest.fixture def table(): \"\"\"Pytest", "boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\":", "DBBackend, PackageStorage from warehouse14.repos_dynamo import DynamoDBBackend from warehouse14.storage import S3Storage @pytest.fixture def bucket():", "\"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\": \"S\"}, ], KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"},", "in the fake moto AWS account \"\"\" with mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\")", "DynamoDBBackend(table) @pytest.fixture def storage(bucket) -> PackageStorage: return S3Storage(bucket) @pytest.fixture async def page(): from", "with mock_s3(): s3 = boto3.resource(\"s3\", region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4())) bucket.create() yield bucket @pytest.fixture", "with mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\":", "import DBBackend, PackageStorage from warehouse14.repos_dynamo import DynamoDBBackend from warehouse14.storage import S3Storage @pytest.fixture def", "@pytest.fixture def authenticator(): return MockAuthenticator() @pytest.fixture def db(table) -> DBBackend: return DynamoDBBackend(table) @pytest.fixture", "account \"\"\" with mock_s3(): s3 = boto3.resource(\"s3\", region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4())) bucket.create() yield", "table(): \"\"\"Pytest fixture that creates the table in the fake moto AWS account", "dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\": \"S\"}, ], KeySchema=[", "DBBackend: return DynamoDBBackend(table) @pytest.fixture def storage(bucket) -> PackageStorage: return S3Storage(bucket) @pytest.fixture async def", "\"ProjectionType\": \"ALL\", }, } ], ) @pytest.fixture def authenticator(): return MockAuthenticator() @pytest.fixture def", "import mock_dynamodb2, mock_s3 from tests.local_login import MockAuthenticator from warehouse14 import DBBackend, PackageStorage from", "fixture that creates the table in the fake moto AWS account \"\"\" with", "\"\"\" with mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\",", "s3.Bucket(str(uuid4())) bucket.create() yield bucket @pytest.fixture def table(): \"\"\"Pytest fixture that creates the table", "\"pk\", \"KeyType\": \"RANGE\"}, ], \"Projection\": { \"ProjectionType\": \"ALL\", }, } ], ) @pytest.fixture", "\"\"\" with mock_s3(): s3 = boto3.resource(\"s3\", region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4())) bucket.create() yield bucket", "import uuid4 import boto3 import pytest from moto import mock_dynamodb2, mock_s3 from tests.local_login", "= boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\",", "uuid import uuid4 import boto3 import pytest from moto import mock_dynamodb2, mock_s3 from", "{\"AttributeName\": \"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\": \"S\"}, ], KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\":", "@pytest.fixture def db(table) -> DBBackend: return DynamoDBBackend(table) @pytest.fixture def storage(bucket) -> PackageStorage: return", "\"IndexName\": \"sk_gsi\", \"KeySchema\": [ {\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"}, ],", "the fake moto AWS account \"\"\" with mock_s3(): s3 = boto3.resource(\"s3\", region_name=\"us-east-1\") bucket", "\"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\", \"KeySchema\":", "AWS account \"\"\" with mock_s3(): s3 = boto3.resource(\"s3\", region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4())) bucket.create()", "AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\": \"S\"}, ], KeySchema=[ {\"AttributeName\": \"pk\",", "S3Storage(bucket) @pytest.fixture async def page(): from pyppeteer import launch browser = await launch({\"headless\":", "PackageStorage: return S3Storage(bucket) @pytest.fixture async def page(): from pyppeteer import launch browser =", "table in the fake moto AWS account \"\"\" with mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\",", "\"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\", \"KeySchema\": [ {\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"},", "\"KeyType\": \"RANGE\"}, ], \"Projection\": { \"ProjectionType\": \"ALL\", }, } ], ) @pytest.fixture def", "BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\", \"KeySchema\": [ {\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\",", "PackageStorage from warehouse14.repos_dynamo import DynamoDBBackend from warehouse14.storage import S3Storage @pytest.fixture def bucket(): \"\"\"Pytest", "= s3.Bucket(str(uuid4())) bucket.create() yield bucket @pytest.fixture def table(): \"\"\"Pytest fixture that creates the", "\"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\", \"KeySchema\": [", "\"\"\"Pytest fixture that creates the table in the fake moto AWS account \"\"\"", "{ \"ProjectionType\": \"ALL\", }, } ], ) @pytest.fixture def authenticator(): return MockAuthenticator() @pytest.fixture", "account \"\"\" with mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\":", "def authenticator(): return MockAuthenticator() @pytest.fixture def db(table) -> DBBackend: return DynamoDBBackend(table) @pytest.fixture def", "@pytest.fixture def bucket(): \"\"\"Pytest fixture that creates the bucket in the fake moto", "import boto3 import pytest from moto import mock_dynamodb2, mock_s3 from tests.local_login import MockAuthenticator", "\"\"\"Pytest fixture that creates the bucket in the fake moto AWS account \"\"\"", "creates the bucket in the fake moto AWS account \"\"\" with mock_s3(): s3", "{\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\":", "KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ {", "region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\": \"S\"},", "import S3Storage @pytest.fixture def bucket(): \"\"\"Pytest fixture that creates the bucket in the", "from moto import mock_dynamodb2, mock_s3 from tests.local_login import MockAuthenticator from warehouse14 import DBBackend,", "\"S\"}, ], KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\",", "\"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\",", "\"sk\", \"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\", \"KeySchema\": [ {\"AttributeName\": \"sk\",", "\"Projection\": { \"ProjectionType\": \"ALL\", }, } ], ) @pytest.fixture def authenticator(): return MockAuthenticator()", "DynamoDBBackend from warehouse14.storage import S3Storage @pytest.fixture def bucket(): \"\"\"Pytest fixture that creates the", "{\"AttributeName\": \"sk\", \"AttributeType\": \"S\"}, ], KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\":", "moto AWS account \"\"\" with mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()),", "import pytest from moto import mock_dynamodb2, mock_s3 from tests.local_login import MockAuthenticator from warehouse14", "from warehouse14.repos_dynamo import DynamoDBBackend from warehouse14.storage import S3Storage @pytest.fixture def bucket(): \"\"\"Pytest fixture", "} ], ) @pytest.fixture def authenticator(): return MockAuthenticator() @pytest.fixture def db(table) -> DBBackend:", "db(table) -> DBBackend: return DynamoDBBackend(table) @pytest.fixture def storage(bucket) -> PackageStorage: return S3Storage(bucket) @pytest.fixture", "boto3.resource(\"s3\", region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4())) bucket.create() yield bucket @pytest.fixture def table(): \"\"\"Pytest fixture", "from pyppeteer import launch browser = await launch({\"headless\": True}) yield (await browser.pages())[0] await", "moto import mock_dynamodb2, mock_s3 from tests.local_login import MockAuthenticator from warehouse14 import DBBackend, PackageStorage", "from warehouse14.storage import S3Storage @pytest.fixture def bucket(): \"\"\"Pytest fixture that creates the bucket", "def table(): \"\"\"Pytest fixture that creates the table in the fake moto AWS", "warehouse14.storage import S3Storage @pytest.fixture def bucket(): \"\"\"Pytest fixture that creates the bucket in", "page(): from pyppeteer import launch browser = await launch({\"headless\": True}) yield (await browser.pages())[0]", "s3 = boto3.resource(\"s3\", region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4())) bucket.create() yield bucket @pytest.fixture def table():", "], KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[", "S3Storage @pytest.fixture def bucket(): \"\"\"Pytest fixture that creates the bucket in the fake", "], \"Projection\": { \"ProjectionType\": \"ALL\", }, } ], ) @pytest.fixture def authenticator(): return", "], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\", \"KeySchema\": [ {\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\":", "bucket.create() yield bucket @pytest.fixture def table(): \"\"\"Pytest fixture that creates the table in", "def storage(bucket) -> PackageStorage: return S3Storage(bucket) @pytest.fixture async def page(): from pyppeteer import", "def bucket(): \"\"\"Pytest fixture that creates the bucket in the fake moto AWS", "}, } ], ) @pytest.fixture def authenticator(): return MockAuthenticator() @pytest.fixture def db(table) ->", "{\"AttributeName\": \"sk\", \"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\", \"KeySchema\": [ {\"AttributeName\":", "yield bucket @pytest.fixture def table(): \"\"\"Pytest fixture that creates the table in the", "that creates the table in the fake moto AWS account \"\"\" with mock_dynamodb2():", "\"KeySchema\": [ {\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"}, ], \"Projection\": {", "[ {\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"}, ], \"Projection\": { \"ProjectionType\":", "def page(): from pyppeteer import launch browser = await launch({\"headless\": True}) yield (await", "from warehouse14 import DBBackend, PackageStorage from warehouse14.repos_dynamo import DynamoDBBackend from warehouse14.storage import S3Storage", "moto AWS account \"\"\" with mock_s3(): s3 = boto3.resource(\"s3\", region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4()))", "bucket = s3.Bucket(str(uuid4())) bucket.create() yield bucket @pytest.fixture def table(): \"\"\"Pytest fixture that creates", "bucket in the fake moto AWS account \"\"\" with mock_s3(): s3 = boto3.resource(\"s3\",", "mock_s3 from tests.local_login import MockAuthenticator from warehouse14 import DBBackend, PackageStorage from warehouse14.repos_dynamo import", "from tests.local_login import MockAuthenticator from warehouse14 import DBBackend, PackageStorage from warehouse14.repos_dynamo import DynamoDBBackend", "\"RANGE\"}, ], \"Projection\": { \"ProjectionType\": \"ALL\", }, } ], ) @pytest.fixture def authenticator():", "mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\": \"S\"},", "that creates the bucket in the fake moto AWS account \"\"\" with mock_s3():", "\"ALL\", }, } ], ) @pytest.fixture def authenticator(): return MockAuthenticator() @pytest.fixture def db(table)", "tests.local_login import MockAuthenticator from warehouse14 import DBBackend, PackageStorage from warehouse14.repos_dynamo import DynamoDBBackend from", "-> DBBackend: return DynamoDBBackend(table) @pytest.fixture def storage(bucket) -> PackageStorage: return S3Storage(bucket) @pytest.fixture async", "return S3Storage(bucket) @pytest.fixture async def page(): from pyppeteer import launch browser = await", "mock_s3(): s3 = boto3.resource(\"s3\", region_name=\"us-east-1\") bucket = s3.Bucket(str(uuid4())) bucket.create() yield bucket @pytest.fixture def", ") @pytest.fixture def authenticator(): return MockAuthenticator() @pytest.fixture def db(table) -> DBBackend: return DynamoDBBackend(table)", "-> PackageStorage: return S3Storage(bucket) @pytest.fixture async def page(): from pyppeteer import launch browser", "fake moto AWS account \"\"\" with mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table(", "mock_dynamodb2, mock_s3 from tests.local_login import MockAuthenticator from warehouse14 import DBBackend, PackageStorage from warehouse14.repos_dynamo", "\"KeyType\": \"RANGE\"}, ], BillingMode=\"PAY_PER_REQUEST\", GlobalSecondaryIndexes=[ { \"IndexName\": \"sk_gsi\", \"KeySchema\": [ {\"AttributeName\": \"sk\", \"KeyType\":", "\"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"}, ], \"Projection\": { \"ProjectionType\": \"ALL\", }, } ],", "{\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"}, ], \"Projection\": { \"ProjectionType\": \"ALL\",", "import MockAuthenticator from warehouse14 import DBBackend, PackageStorage from warehouse14.repos_dynamo import DynamoDBBackend from warehouse14.storage", "\"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"}, ], \"Projection\": { \"ProjectionType\": \"ALL\", }, }", "return MockAuthenticator() @pytest.fixture def db(table) -> DBBackend: return DynamoDBBackend(table) @pytest.fixture def storage(bucket) ->", "pytest from moto import mock_dynamodb2, mock_s3 from tests.local_login import MockAuthenticator from warehouse14 import", "\"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\": \"S\"}, ], KeySchema=[ {\"AttributeName\": \"pk\", \"KeyType\": \"HASH\"}, {\"AttributeName\":", "pyppeteer import launch browser = await launch({\"headless\": True}) yield (await browser.pages())[0] await browser.close()", "async def page(): from pyppeteer import launch browser = await launch({\"headless\": True}) yield", "MockAuthenticator from warehouse14 import DBBackend, PackageStorage from warehouse14.repos_dynamo import DynamoDBBackend from warehouse14.storage import", "bucket(): \"\"\"Pytest fixture that creates the bucket in the fake moto AWS account", "bucket @pytest.fixture def table(): \"\"\"Pytest fixture that creates the table in the fake", "the bucket in the fake moto AWS account \"\"\" with mock_s3(): s3 =", "warehouse14.repos_dynamo import DynamoDBBackend from warehouse14.storage import S3Storage @pytest.fixture def bucket(): \"\"\"Pytest fixture that", "\"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"}, ], \"Projection\": { \"ProjectionType\": \"ALL\", },", "the table in the fake moto AWS account \"\"\" with mock_dynamodb2(): dynamodb =", "MockAuthenticator() @pytest.fixture def db(table) -> DBBackend: return DynamoDBBackend(table) @pytest.fixture def storage(bucket) -> PackageStorage:", "warehouse14 import DBBackend, PackageStorage from warehouse14.repos_dynamo import DynamoDBBackend from warehouse14.storage import S3Storage @pytest.fixture", "fixture that creates the bucket in the fake moto AWS account \"\"\" with", "AWS account \"\"\" with mock_dynamodb2(): dynamodb = boto3.resource(\"dynamodb\", region_name=\"us-east-1\") yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[", "yield dynamodb.create_table( TableName=str(uuid4()), AttributeDefinitions=[ {\"AttributeName\": \"pk\", \"AttributeType\": \"S\"}, {\"AttributeName\": \"sk\", \"AttributeType\": \"S\"}, ],", "\"sk_gsi\", \"KeySchema\": [ {\"AttributeName\": \"sk\", \"KeyType\": \"HASH\"}, {\"AttributeName\": \"pk\", \"KeyType\": \"RANGE\"}, ], \"Projection\":" ]
[ "Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model = Character class BookAdmin(EnhancedModelAdmin):", "admin.TabularInline): model = Character class BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,) filter_horizontal = ('themes',) admin.site.register(Author,", "EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model = Character class BookAdmin(EnhancedModelAdmin): inlines =", "enhanced_admin from .models import Author, Book, Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class", "Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model = Character class", "django.contrib import admin from .. import admin as enhanced_admin from .models import Author,", "import admin as enhanced_admin from .models import Author, Book, Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin,", "import admin from .. import admin as enhanced_admin from .models import Author, Book,", "Character class BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,) filter_horizontal = ('themes',) admin.site.register(Author, EnhancedModelAdmin) admin.site.register(Book, BookAdmin)", ".models import Author, Book, Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline):", "pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model = Character class BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,) filter_horizontal", "Author, Book, Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model =", "Book, Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model = Character", "as enhanced_admin from .models import Author, Book, Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass", "from django.contrib import admin from .. import admin as enhanced_admin from .models import", "= Character class BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,) filter_horizontal = ('themes',) admin.site.register(Author, EnhancedModelAdmin) admin.site.register(Book,", "admin from .. import admin as enhanced_admin from .models import Author, Book, Character,", "class BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,) filter_horizontal = ('themes',) admin.site.register(Author, EnhancedModelAdmin) admin.site.register(Book, BookAdmin) admin.site.register(Theme,", "from .. import admin as enhanced_admin from .models import Author, Book, Character, Theme", "admin as enhanced_admin from .models import Author, Book, Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin):", "BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,) filter_horizontal = ('themes',) admin.site.register(Author, EnhancedModelAdmin) admin.site.register(Book, BookAdmin) admin.site.register(Theme, EnhancedModelAdmin)", "from .models import Author, Book, Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin,", "class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model = Character class BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,) filter_horizontal =", "admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model = Character class BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,)", "class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model = Character class BookAdmin(EnhancedModelAdmin): inlines", ".. import admin as enhanced_admin from .models import Author, Book, Character, Theme class", "model = Character class BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,) filter_horizontal = ('themes',) admin.site.register(Author, EnhancedModelAdmin)", "import Author, Book, Character, Theme class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin, admin.ModelAdmin): pass class CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model", "CharacterInline(enhanced_admin.EnhancedAdminMixin, admin.TabularInline): model = Character class BookAdmin(EnhancedModelAdmin): inlines = (CharacterInline,) filter_horizontal = ('themes',)" ]
[ "0] divisor_list.insert(0, 2) for divisor in divisor_list: modulus = testnumber % divisor if", "index_limit: state = self.determinePrime(test_integer) if state is True: listofprimes.append(test_integer) else: pass test_integer +=", "divisor = testnumber return primestate def primeList(self, index_limit): listofprimes = [2] test_integer =", "if state is True: listofprimes.append(test_integer) else: pass test_integer += 1 index_j = len(listofprimes)", "1) divisor_list = [element for element in list1 if element % 2 !=", "state = self.determinePrime(test_integer) if state is True: listofprimes.append(test_integer) else: pass test_integer += 1", "False divisor = testnumber return primestate def primeList(self, index_limit): listofprimes = [2] test_integer", "while index_j < index_limit: state = self.determinePrime(test_integer) if state is True: listofprimes.append(test_integer) else:", "__init__(self): pass def determinePrime(self, testnumber): primestate = True list1 = range(3, int(math.sqrt(testnumber)) +", "element % 2 != 0] divisor_list.insert(0, 2) for divisor in divisor_list: modulus =", "in list1 if element % 2 != 0] divisor_list.insert(0, 2) for divisor in", "[2] test_integer = 3 index_j = 0 while index_j < index_limit: state =", "divisor in divisor_list: modulus = testnumber % divisor if modulus == 0: primestate", "import math class PrimeNumbers(object): def __init__(self): pass def determinePrime(self, testnumber): primestate = True", "is True: listofprimes.append(test_integer) else: pass test_integer += 1 index_j = len(listofprimes) return listofprimes", "determinePrime(self, testnumber): primestate = True list1 = range(3, int(math.sqrt(testnumber)) + 1) divisor_list =", "index_limit): listofprimes = [2] test_integer = 3 index_j = 0 while index_j <", "return primestate def primeList(self, index_limit): listofprimes = [2] test_integer = 3 index_j =", "0: primestate = False divisor = testnumber return primestate def primeList(self, index_limit): listofprimes", "index_j < index_limit: state = self.determinePrime(test_integer) if state is True: listofprimes.append(test_integer) else: pass", "PrimeNumbers(object): def __init__(self): pass def determinePrime(self, testnumber): primestate = True list1 = range(3,", "= False divisor = testnumber return primestate def primeList(self, index_limit): listofprimes = [2]", "self.determinePrime(test_integer) if state is True: listofprimes.append(test_integer) else: pass test_integer += 1 index_j =", "testnumber % divisor if modulus == 0: primestate = False divisor = testnumber", "== 0: primestate = False divisor = testnumber return primestate def primeList(self, index_limit):", "if modulus == 0: primestate = False divisor = testnumber return primestate def", "testnumber): primestate = True list1 = range(3, int(math.sqrt(testnumber)) + 1) divisor_list = [element", "2 != 0] divisor_list.insert(0, 2) for divisor in divisor_list: modulus = testnumber %", "modulus == 0: primestate = False divisor = testnumber return primestate def primeList(self,", "= 3 index_j = 0 while index_j < index_limit: state = self.determinePrime(test_integer) if", "list1 = range(3, int(math.sqrt(testnumber)) + 1) divisor_list = [element for element in list1", "[element for element in list1 if element % 2 != 0] divisor_list.insert(0, 2)", "primestate def primeList(self, index_limit): listofprimes = [2] test_integer = 3 index_j = 0", "= [element for element in list1 if element % 2 != 0] divisor_list.insert(0,", "divisor_list.insert(0, 2) for divisor in divisor_list: modulus = testnumber % divisor if modulus", "+ 1) divisor_list = [element for element in list1 if element % 2", "< index_limit: state = self.determinePrime(test_integer) if state is True: listofprimes.append(test_integer) else: pass test_integer", "list1 if element % 2 != 0] divisor_list.insert(0, 2) for divisor in divisor_list:", "def __init__(self): pass def determinePrime(self, testnumber): primestate = True list1 = range(3, int(math.sqrt(testnumber))", "= 0 while index_j < index_limit: state = self.determinePrime(test_integer) if state is True:", "int(math.sqrt(testnumber)) + 1) divisor_list = [element for element in list1 if element %", "test_integer = 3 index_j = 0 while index_j < index_limit: state = self.determinePrime(test_integer)", "!= 0] divisor_list.insert(0, 2) for divisor in divisor_list: modulus = testnumber % divisor", "element in list1 if element % 2 != 0] divisor_list.insert(0, 2) for divisor", "% divisor if modulus == 0: primestate = False divisor = testnumber return", "primeList(self, index_limit): listofprimes = [2] test_integer = 3 index_j = 0 while index_j", "= self.determinePrime(test_integer) if state is True: listofprimes.append(test_integer) else: pass test_integer += 1 index_j", "2) for divisor in divisor_list: modulus = testnumber % divisor if modulus ==", "= [2] test_integer = 3 index_j = 0 while index_j < index_limit: state", "index_j = 0 while index_j < index_limit: state = self.determinePrime(test_integer) if state is", "testnumber return primestate def primeList(self, index_limit): listofprimes = [2] test_integer = 3 index_j", "for element in list1 if element % 2 != 0] divisor_list.insert(0, 2) for", "for divisor in divisor_list: modulus = testnumber % divisor if modulus == 0:", "state is True: listofprimes.append(test_integer) else: pass test_integer += 1 index_j = len(listofprimes) return", "in divisor_list: modulus = testnumber % divisor if modulus == 0: primestate =", "def determinePrime(self, testnumber): primestate = True list1 = range(3, int(math.sqrt(testnumber)) + 1) divisor_list", "= range(3, int(math.sqrt(testnumber)) + 1) divisor_list = [element for element in list1 if", "3 index_j = 0 while index_j < index_limit: state = self.determinePrime(test_integer) if state", "math class PrimeNumbers(object): def __init__(self): pass def determinePrime(self, testnumber): primestate = True list1", "= testnumber % divisor if modulus == 0: primestate = False divisor =", "= True list1 = range(3, int(math.sqrt(testnumber)) + 1) divisor_list = [element for element", "modulus = testnumber % divisor if modulus == 0: primestate = False divisor", "class PrimeNumbers(object): def __init__(self): pass def determinePrime(self, testnumber): primestate = True list1 =", "primestate = True list1 = range(3, int(math.sqrt(testnumber)) + 1) divisor_list = [element for", "divisor if modulus == 0: primestate = False divisor = testnumber return primestate", "divisor_list: modulus = testnumber % divisor if modulus == 0: primestate = False", "divisor_list = [element for element in list1 if element % 2 != 0]", "0 while index_j < index_limit: state = self.determinePrime(test_integer) if state is True: listofprimes.append(test_integer)", "listofprimes = [2] test_integer = 3 index_j = 0 while index_j < index_limit:", "True list1 = range(3, int(math.sqrt(testnumber)) + 1) divisor_list = [element for element in", "range(3, int(math.sqrt(testnumber)) + 1) divisor_list = [element for element in list1 if element", "% 2 != 0] divisor_list.insert(0, 2) for divisor in divisor_list: modulus = testnumber", "<filename>eulerproject.py<gh_stars>0 import math class PrimeNumbers(object): def __init__(self): pass def determinePrime(self, testnumber): primestate =", "def primeList(self, index_limit): listofprimes = [2] test_integer = 3 index_j = 0 while", "pass def determinePrime(self, testnumber): primestate = True list1 = range(3, int(math.sqrt(testnumber)) + 1)", "if element % 2 != 0] divisor_list.insert(0, 2) for divisor in divisor_list: modulus", "= testnumber return primestate def primeList(self, index_limit): listofprimes = [2] test_integer = 3", "primestate = False divisor = testnumber return primestate def primeList(self, index_limit): listofprimes =" ]
[ "def zardoz_roll(self, ctx, *, args): try: roll = RollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode)", "and DM to member.') @fetch_guild_db @handle_http_exception async def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *,", "help='Reroll previous roll') @fetch_guild_db @handle_http_exception async def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]): if member", "LoggingMixin): def __init__(self, bot, db): self.bot = bot self.db = db super().__init__() @commands.command(name='z',", "SekretRollHandler, RollList, DiceDelta) from .utils import handle_http_exception class RollCommands(commands.Cog, LoggingMixin): def __init__(self, bot,", ".rolls import (RollHandler, QuietRollHandler, SekretRollHandler, RollList, DiceDelta) from .utils import handle_http_exception class RollCommands(commands.Cog,", "import (RollHandler, QuietRollHandler, SekretRollHandler, RollList, DiceDelta) from .utils import handle_http_exception class RollCommands(commands.Cog, LoggingMixin):", "@fetch_guild_db @handle_http_exception async def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]): if member is None: member", "args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.message.reply(f'You fucked", "roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a secret roll and DM to member.') @fetch_guild_db", "db): self.bot = bot self.db = db super().__init__() @commands.command(name='z', help='Evaluate a dice roll.')", "RollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling failed: {e}')", "try: roll = RollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll", "= QuietRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling failed:", "ctx.variables, args, game_mode=ctx.game_mode, require_tag=True) except ValueError as e: self.log.error(f'Roll handling failed: {e}') await", "self.log.error(f'Roll handling failed: {e}') await ctx.author.send(f'You fucked up your roll, {ctx.author}. {e}') else:", "= ctx.author try: roll = SekretRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode, require_tag=True) except ValueError", "{ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous roll') @fetch_guild_db @handle_http_exception", "is None: member = ctx.author try: roll = SekretRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode,", "@handle_http_exception async def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *, args): if member is None:", "typing.Optional[discord.Member]): if member is None: member = ctx.author saved = await ctx.guild_db.get_last_user_roll(member.id) if", "await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a dice roll, quietly.') @fetch_guild_db @handle_http_exception async", "async def zardoz_quiet_roll(self, ctx, *, args): try: roll = QuietRollHandler(ctx, self.log, ctx.variables, args,", "roll = QuietRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling", "@commands.command(name='zq', help='Evaluate a dice roll, quietly.') @fetch_guild_db @handle_http_exception async def zardoz_quiet_roll(self, ctx, *,", "self.db = db super().__init__() @commands.command(name='z', help='Evaluate a dice roll.') @fetch_guild_db @handle_http_exception async def", "roll.') @fetch_guild_db @handle_http_exception async def zardoz_roll(self, ctx, *, args): try: roll = RollHandler(ctx,", "await member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous roll') @fetch_guild_db @handle_http_exception async def zroll_reroll(self, ctx, member:", "import handle_http_exception class RollCommands(commands.Cog, LoggingMixin): def __init__(self, bot, db): self.bot = bot self.db", "except ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.message.reply(f'You fucked up your", "import LoggingMixin from .rolls import (RollHandler, QuietRollHandler, SekretRollHandler, RollList, DiceDelta) from .utils import", "from .utils import handle_http_exception class RollCommands(commands.Cog, LoggingMixin): def __init__(self, bot, db): self.bot =", "ctx, member: typing.Optional[discord.Member]): if member is None: member = ctx.author saved = await", "roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a dice roll,", "*, args): try: roll = RollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as", "a dice roll, quietly.') @fetch_guild_db @handle_http_exception async def zardoz_quiet_roll(self, ctx, *, args): try:", "zardoz_roll(self, ctx, *, args): try: roll = RollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except", "= db super().__init__() @commands.command(name='z', help='Evaluate a dice roll.') @fetch_guild_db @handle_http_exception async def zardoz_roll(self,", "@commands.command(name='z', help='Evaluate a dice roll.') @fetch_guild_db @handle_http_exception async def zardoz_roll(self, ctx, *, args):", "typing.Optional[discord.Member], *, args): if member is None: member = ctx.author try: roll =", "if member is None: member = ctx.author saved = await ctx.guild_db.get_last_user_roll(member.id) if saved", "dice roll.') @fetch_guild_db @handle_http_exception async def zardoz_roll(self, ctx, *, args): try: roll =", "__init__(self, bot, db): self.bot = bot self.db = db super().__init__() @commands.command(name='z', help='Evaluate a", "{member}.') else: cmd = saved['roll'] roll = RollHandler(ctx, self.log, ctx.variables, cmd, game_mode=ctx.game_mode) await", "ctx, member: typing.Optional[discord.Member], *, args): if member is None: member = ctx.author try:", "await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a dice roll, quietly.') @fetch_guild_db @handle_http_exception async def zardoz_quiet_roll(self,", "{ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a secret roll and", "secret roll and DM to member.') @fetch_guild_db @handle_http_exception async def zardoz_secret_roll(self, ctx, member:", "failed: {e}') await ctx.author.send(f'You fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db)", "roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a secret roll", "{e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a secret roll and DM", "handling failed: {e}') await ctx.author.send(f'You fucked up your roll, {ctx.author}. {e}') else: await", "async def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]): if member is None: member = ctx.author", "@handle_http_exception async def zardoz_roll(self, ctx, *, args): try: roll = RollHandler(ctx, self.log, ctx.variables,", "member: typing.Optional[discord.Member]): if member is None: member = ctx.author saved = await ctx.guild_db.get_last_user_roll(member.id)", "e: self.log.error(f'Roll handling failed: {e}') await ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}')", "ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a secret roll and DM to member.') @fetch_guild_db @handle_http_exception async", "if saved is None: await ctx.message.reply(f'Ope, no roll history for {member}.') else: cmd", "def __init__(self, bot, db): self.bot = bot self.db = db super().__init__() @commands.command(name='z', help='Evaluate", "{e}') await ctx.author.send(f'You fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await", "DM to member.') @fetch_guild_db @handle_http_exception async def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *, args):", "DiceDelta) from .utils import handle_http_exception class RollCommands(commands.Cog, LoggingMixin): def __init__(self, bot, db): self.bot", "saved = await ctx.guild_db.get_last_user_roll(member.id) if saved is None: await ctx.message.reply(f'Ope, no roll history", "roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a dice roll, quietly.') @fetch_guild_db @handle_http_exception async def", "member is None: member = ctx.author saved = await ctx.guild_db.get_last_user_roll(member.id) if saved is", "member is None: member = ctx.author try: roll = SekretRollHandler(ctx, self.log, ctx.variables, args,", "def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]): if member is None: member = ctx.author saved", "roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous roll') @fetch_guild_db", "import fetch_guild_db from .logging import LoggingMixin from .rolls import (RollHandler, QuietRollHandler, SekretRollHandler, RollList,", "await ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg())", "async def zardoz_roll(self, ctx, *, args): try: roll = RollHandler(ctx, self.log, ctx.variables, args,", "= RollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling failed:", "your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous roll')", "await roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous roll') @fetch_guild_db @handle_http_exception async def zroll_reroll(self,", "import discord from discord.ext import commands from .database import fetch_guild_db from .logging import", "else: cmd = saved['roll'] roll = RollHandler(ctx, self.log, ctx.variables, cmd, game_mode=ctx.game_mode) await roll.add_to_db(ctx.guild_db)", "RollCommands(commands.Cog, LoggingMixin): def __init__(self, bot, db): self.bot = bot self.db = db super().__init__()", ".database import fetch_guild_db from .logging import LoggingMixin from .rolls import (RollHandler, QuietRollHandler, SekretRollHandler,", "= ctx.author saved = await ctx.guild_db.get_last_user_roll(member.id) if saved is None: await ctx.message.reply(f'Ope, no", "zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *, args): if member is None: member = ctx.author", "history for {member}.') else: cmd = saved['roll'] roll = RollHandler(ctx, self.log, ctx.variables, cmd,", "else: await roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous roll') @fetch_guild_db @handle_http_exception async def", "def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *, args): if member is None: member =", "member: typing.Optional[discord.Member], *, args): if member is None: member = ctx.author try: roll", "else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a secret roll and DM to", "help='Evaluate a dice roll, quietly.') @fetch_guild_db @handle_http_exception async def zardoz_quiet_roll(self, ctx, *, args):", "if member is None: member = ctx.author try: roll = SekretRollHandler(ctx, self.log, ctx.variables,", "self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling failed: {e}') await", "db super().__init__() @commands.command(name='z', help='Evaluate a dice roll.') @fetch_guild_db @handle_http_exception async def zardoz_roll(self, ctx,", "failed: {e}') await ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db)", "*, args): if member is None: member = ctx.author try: roll = SekretRollHandler(ctx,", "import commands from .database import fetch_guild_db from .logging import LoggingMixin from .rolls import", "try: roll = SekretRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode, require_tag=True) except ValueError as e:", "game_mode=ctx.game_mode, require_tag=True) except ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.author.send(f'You fucked", "as e: self.log.error(f'Roll handling failed: {e}') await ctx.author.send(f'You fucked up your roll, {ctx.author}.", "saved['roll'] roll = RollHandler(ctx, self.log, ctx.variables, cmd, game_mode=ctx.game_mode) await roll.add_to_db(ctx.guild_db) await ctx.message.reply(f'Reroll {roll.msg()}')", "member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous roll') @fetch_guild_db @handle_http_exception async def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]):", "await ctx.guild_db.get_last_user_roll(member.id) if saved is None: await ctx.message.reply(f'Ope, no roll history for {member}.')", "= await ctx.guild_db.get_last_user_roll(member.id) if saved is None: await ctx.message.reply(f'Ope, no roll history for", "commands from .database import fetch_guild_db from .logging import LoggingMixin from .rolls import (RollHandler,", "roll = RollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling", "bot, db): self.bot = bot self.db = db super().__init__() @commands.command(name='z', help='Evaluate a dice", "from .logging import LoggingMixin from .rolls import (RollHandler, QuietRollHandler, SekretRollHandler, RollList, DiceDelta) from", "from .database import fetch_guild_db from .logging import LoggingMixin from .rolls import (RollHandler, QuietRollHandler,", "async def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *, args): if member is None: member", "args, game_mode=ctx.game_mode, require_tag=True) except ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.author.send(f'You", "cmd = saved['roll'] roll = RollHandler(ctx, self.log, ctx.variables, cmd, game_mode=ctx.game_mode) await roll.add_to_db(ctx.guild_db) await", "ctx.guild_db.get_last_user_roll(member.id) if saved is None: await ctx.message.reply(f'Ope, no roll history for {member}.') else:", "(RollHandler, QuietRollHandler, SekretRollHandler, RollList, DiceDelta) from .utils import handle_http_exception class RollCommands(commands.Cog, LoggingMixin): def", "= saved['roll'] roll = RollHandler(ctx, self.log, ctx.variables, cmd, game_mode=ctx.game_mode) await roll.add_to_db(ctx.guild_db) await ctx.message.reply(f'Reroll", "dice roll, quietly.') @fetch_guild_db @handle_http_exception async def zardoz_quiet_roll(self, ctx, *, args): try: roll", "def zardoz_quiet_roll(self, ctx, *, args): try: roll = QuietRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode)", "saved is None: await ctx.message.reply(f'Ope, no roll history for {member}.') else: cmd =", "@handle_http_exception async def zardoz_quiet_roll(self, ctx, *, args): try: roll = QuietRollHandler(ctx, self.log, ctx.variables,", ".logging import LoggingMixin from .rolls import (RollHandler, QuietRollHandler, SekretRollHandler, RollList, DiceDelta) from .utils", "zroll_reroll(self, ctx, member: typing.Optional[discord.Member]): if member is None: member = ctx.author saved =", "{ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a dice roll, quietly.')", "typing import discord from discord.ext import commands from .database import fetch_guild_db from .logging", "require_tag=True) except ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.author.send(f'You fucked up", "ctx.author saved = await ctx.guild_db.get_last_user_roll(member.id) if saved is None: await ctx.message.reply(f'Ope, no roll", "ctx, *, args): try: roll = QuietRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError", "no roll history for {member}.') else: cmd = saved['roll'] roll = RollHandler(ctx, self.log,", "previous roll') @fetch_guild_db @handle_http_exception async def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]): if member is", "a secret roll and DM to member.') @fetch_guild_db @handle_http_exception async def zardoz_secret_roll(self, ctx,", "zardoz_quiet_roll(self, ctx, *, args): try: roll = QuietRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except", "args): try: roll = RollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e:", "*, args): try: roll = QuietRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as", "handle_http_exception class RollCommands(commands.Cog, LoggingMixin): def __init__(self, bot, db): self.bot = bot self.db =", "fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make", "ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs',", "SekretRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode, require_tag=True) except ValueError as e: self.log.error(f'Roll handling failed:", "fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr', help='Reroll", "for {member}.') else: cmd = saved['roll'] roll = RollHandler(ctx, self.log, ctx.variables, cmd, game_mode=ctx.game_mode)", "await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a secret roll and DM to member.')", "await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a secret roll and DM to member.') @fetch_guild_db @handle_http_exception", "handling failed: {e}') await ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}') else: await", "@handle_http_exception async def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]): if member is None: member =", "@commands.command(name='zr', help='Reroll previous roll') @fetch_guild_db @handle_http_exception async def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]): if", "from .rolls import (RollHandler, QuietRollHandler, SekretRollHandler, RollList, DiceDelta) from .utils import handle_http_exception class", "None: await ctx.message.reply(f'Ope, no roll history for {member}.') else: cmd = saved['roll'] roll", "discord from discord.ext import commands from .database import fetch_guild_db from .logging import LoggingMixin", "ctx.author.send(f'You fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr',", "is None: await ctx.message.reply(f'Ope, no roll history for {member}.') else: cmd = saved['roll']", "ctx, *, args): try: roll = RollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError", "your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a secret", "help='Make a secret roll and DM to member.') @fetch_guild_db @handle_http_exception async def zardoz_secret_roll(self,", "await ctx.message.reply(f'Ope, no roll history for {member}.') else: cmd = saved['roll'] roll =", "class RollCommands(commands.Cog, LoggingMixin): def __init__(self, bot, db): self.bot = bot self.db = db", "fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate", "= bot self.db = db super().__init__() @commands.command(name='z', help='Evaluate a dice roll.') @fetch_guild_db @handle_http_exception", "import typing import discord from discord.ext import commands from .database import fetch_guild_db from", "is None: member = ctx.author saved = await ctx.guild_db.get_last_user_roll(member.id) if saved is None:", "game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.message.reply(f'You fucked up", "LoggingMixin from .rolls import (RollHandler, QuietRollHandler, SekretRollHandler, RollList, DiceDelta) from .utils import handle_http_exception", "roll, quietly.') @fetch_guild_db @handle_http_exception async def zardoz_quiet_roll(self, ctx, *, args): try: roll =", "roll = SekretRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode, require_tag=True) except ValueError as e: self.log.error(f'Roll", "self.log, ctx.variables, args, game_mode=ctx.game_mode, require_tag=True) except ValueError as e: self.log.error(f'Roll handling failed: {e}')", "member = ctx.author saved = await ctx.guild_db.get_last_user_roll(member.id) if saved is None: await ctx.message.reply(f'Ope,", "ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a dice roll, quietly.') @fetch_guild_db @handle_http_exception async def zardoz_quiet_roll(self, ctx,", "ctx.message.reply(f'Ope, no roll history for {member}.') else: cmd = saved['roll'] roll = RollHandler(ctx,", "from discord.ext import commands from .database import fetch_guild_db from .logging import LoggingMixin from", "help='Evaluate a dice roll.') @fetch_guild_db @handle_http_exception async def zardoz_roll(self, ctx, *, args): try:", "as e: self.log.error(f'Roll handling failed: {e}') await ctx.message.reply(f'You fucked up your roll, {ctx.author}.", "ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.message.reply(f'You fucked up your roll,", "args): try: roll = QuietRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e:", "{e}') else: await roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous roll') @fetch_guild_db @handle_http_exception async", "member.') @fetch_guild_db @handle_http_exception async def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *, args): if member", "member = ctx.author try: roll = SekretRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode, require_tag=True) except", "ctx.author try: roll = SekretRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode, require_tag=True) except ValueError as", "bot self.db = db super().__init__() @commands.command(name='z', help='Evaluate a dice roll.') @fetch_guild_db @handle_http_exception async", "super().__init__() @commands.command(name='z', help='Evaluate a dice roll.') @fetch_guild_db @handle_http_exception async def zardoz_roll(self, ctx, *,", "to member.') @fetch_guild_db @handle_http_exception async def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *, args): if", "= SekretRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode, require_tag=True) except ValueError as e: self.log.error(f'Roll handling", "self.bot = bot self.db = db super().__init__() @commands.command(name='z', help='Evaluate a dice roll.') @fetch_guild_db", "a dice roll.') @fetch_guild_db @handle_http_exception async def zardoz_roll(self, ctx, *, args): try: roll", "e: self.log.error(f'Roll handling failed: {e}') await ctx.author.send(f'You fucked up your roll, {ctx.author}. {e}')", "up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zs', help='Make a", "except ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.author.send(f'You fucked up your", "up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a", "{e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a dice roll, quietly.') @fetch_guild_db", "fetch_guild_db from .logging import LoggingMixin from .rolls import (RollHandler, QuietRollHandler, SekretRollHandler, RollList, DiceDelta)", "RollList, DiceDelta) from .utils import handle_http_exception class RollCommands(commands.Cog, LoggingMixin): def __init__(self, bot, db):", ".utils import handle_http_exception class RollCommands(commands.Cog, LoggingMixin): def __init__(self, bot, db): self.bot = bot", "@fetch_guild_db @handle_http_exception async def zardoz_quiet_roll(self, ctx, *, args): try: roll = QuietRollHandler(ctx, self.log,", "None: member = ctx.author try: roll = SekretRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode, require_tag=True)", "ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq',", "roll history for {member}.') else: cmd = saved['roll'] roll = RollHandler(ctx, self.log, ctx.variables,", "@commands.command(name='zs', help='Make a secret roll and DM to member.') @fetch_guild_db @handle_http_exception async def", "ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.author.send(f'You fucked up your roll,", "QuietRollHandler, SekretRollHandler, RollList, DiceDelta) from .utils import handle_http_exception class RollCommands(commands.Cog, LoggingMixin): def __init__(self,", "quietly.') @fetch_guild_db @handle_http_exception async def zardoz_quiet_roll(self, ctx, *, args): try: roll = QuietRollHandler(ctx,", "self.log.error(f'Roll handling failed: {e}') await ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}') else:", "discord.ext import commands from .database import fetch_guild_db from .logging import LoggingMixin from .rolls", "roll and DM to member.') @fetch_guild_db @handle_http_exception async def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member],", "{e}') await ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await", "await ctx.author.send(f'You fucked up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await member.send(roll.msg())", "up your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous", "roll') @fetch_guild_db @handle_http_exception async def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]): if member is None:", "QuietRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling failed: {e}')", "roll.add_to_db(ctx.guild_db) await member.send(roll.msg()) @commands.command(name='zr', help='Reroll previous roll') @fetch_guild_db @handle_http_exception async def zroll_reroll(self, ctx,", "args): if member is None: member = ctx.author try: roll = SekretRollHandler(ctx, self.log,", "None: member = ctx.author saved = await ctx.guild_db.get_last_user_roll(member.id) if saved is None: await", "else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a dice roll, quietly.') @fetch_guild_db @handle_http_exception", "@fetch_guild_db @handle_http_exception async def zardoz_roll(self, ctx, *, args): try: roll = RollHandler(ctx, self.log,", "try: roll = QuietRollHandler(ctx, self.log, ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll", "your roll, {ctx.author}. {e}') else: await roll.add_to_db(ctx.guild_db) await ctx.message.reply(roll.msg()) @commands.command(name='zq', help='Evaluate a dice", "ctx.variables, args, game_mode=ctx.game_mode) except ValueError as e: self.log.error(f'Roll handling failed: {e}') await ctx.message.reply(f'You", "@fetch_guild_db @handle_http_exception async def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *, args): if member is" ]
[ "extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native']) else: compile_args = {} libraries = []", "cmdclass= {'build_ext': build_ext}, ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True, ) # conda create -n", "# -*- coding: utf-8 -*- import os import sys import re from setuptools", "ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ] setup(name='speedyfx', version=version, description='', author='', cmdclass= {'build_ext': build_ext},", "= open(VERSIONFILE, \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M)", "sting inside the package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE, \"rt\").read() VSRE", "from setuptools import setup, find_packages, Extension from Cython.Distutils import build_ext #from distutils.core import", "VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE, \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE,", "import Cython.Compiler.Options Cython.Compiler.Options.annotate = True # a define the version sting inside the", "as np import Cython.Compiler.Options Cython.Compiler.Options.annotate = True # a define the version sting", "Extension from Cython.Distutils import build_ext #from distutils.core import setup #from distutils.extension import Extension", "<reponame>rth/py-speedyfx #!/usr/bin/python # -*- coding: utf-8 -*- import os import sys import re", "[ np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ] setup(name='speedyfx', version=version, description='', author='',", "[] if os.name == 'posix': libraries.append('m') include_dirs= [ np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"],", "setup, find_packages, Extension from Cython.Distutils import build_ext #from distutils.core import setup #from distutils.extension", "import setup #from distutils.extension import Extension import numpy as np import Cython.Compiler.Options Cython.Compiler.Options.annotate", "ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True, ) # conda create -n speedyfx-nskl-env setuptools six", "the package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE, \"rt\").read() VSRE = r\"^__version__", "'-march=native', '-mtune=native']) else: compile_args = {} libraries = [] if os.name == 'posix':", "ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True, ) # conda create -n speedyfx-nskl-env setuptools six cython", "package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE, \"rt\").read() VSRE = r\"^__version__ =", "find_packages, Extension from Cython.Distutils import build_ext #from distutils.core import setup #from distutils.extension import", "import numpy as np import Cython.Compiler.Options Cython.Compiler.Options.annotate = True # a define the", ") # conda create -n speedyfx-nskl-env setuptools six cython scipy numpy pytest python=3.5", "import re from setuptools import setup, find_packages, Extension from Cython.Distutils import build_ext #from", "distutils.extension import Extension import numpy as np import Cython.Compiler.Options Cython.Compiler.Options.annotate = True #", "string in %s.\" % (VERSIONFILE,)) if sys.platform != 'win32': compile_args = dict( extra_compile_args=['-O2',", "mo: version = mo.group(1) else: raise RuntimeError(\"Unable to find version string in %s.\"", "import build_ext #from distutils.core import setup #from distutils.extension import Extension import numpy as", "coding: utf-8 -*- import os import sys import re from setuptools import setup,", "build_ext}, ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True, ) # conda create -n speedyfx-nskl-env setuptools", "description='', author='', cmdclass= {'build_ext': build_ext}, ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True, ) # conda", "setuptools six cython scipy numpy pytest python=3.5 # conda env remove -n speedyfx-nskl-env", "libraries.append('m') include_dirs= [ np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ] setup(name='speedyfx', version=version,", "setup(name='speedyfx', version=version, description='', author='', cmdclass= {'build_ext': build_ext}, ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True, )", "re.M) if mo: version = mo.group(1) else: raise RuntimeError(\"Unable to find version string", "'-mtune=native']) else: compile_args = {} libraries = [] if os.name == 'posix': libraries.append('m')", "inside the package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE, \"rt\").read() VSRE =", "== 'posix': libraries.append('m') include_dirs= [ np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ]", "np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ] setup(name='speedyfx', version=version, description='', author='', cmdclass=", "# see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE, \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"", "Extension import numpy as np import Cython.Compiler.Options Cython.Compiler.Options.annotate = True # a define", "if sys.platform != 'win32': compile_args = dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native'])", "r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: version = mo.group(1)", "-n speedyfx-nskl-env setuptools six cython scipy numpy pytest python=3.5 # conda env remove", "import os import sys import re from setuptools import setup, find_packages, Extension from", "{} libraries = [] if os.name == 'posix': libraries.append('m') include_dirs= [ np.get_include() ]", "conda create -n speedyfx-nskl-env setuptools six cython scipy numpy pytest python=3.5 # conda", "https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE, \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo =", "] setup(name='speedyfx', version=version, description='', author='', cmdclass= {'build_ext': build_ext}, ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True,", "= {} libraries = [] if os.name == 'posix': libraries.append('m') include_dirs= [ np.get_include()", "build_ext #from distutils.core import setup #from distutils.extension import Extension import numpy as np", "setup #from distutils.extension import Extension import numpy as np import Cython.Compiler.Options Cython.Compiler.Options.annotate =", "-*- coding: utf-8 -*- import os import sys import re from setuptools import", "author='', cmdclass= {'build_ext': build_ext}, ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True, ) # conda create", "include_package_data=True, ) # conda create -n speedyfx-nskl-env setuptools six cython scipy numpy pytest", "the version sting inside the package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE,", "= ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: version = mo.group(1) else:", "# a define the version sting inside the package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\"", "'posix': libraries.append('m') include_dirs= [ np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ] setup(name='speedyfx',", "find version string in %s.\" % (VERSIONFILE,)) if sys.platform != 'win32': compile_args =", "!= 'win32': compile_args = dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native']) else: compile_args", "setuptools import setup, find_packages, Extension from Cython.Distutils import build_ext #from distutils.core import setup", "re from setuptools import setup, find_packages, Extension from Cython.Distutils import build_ext #from distutils.core", "= dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native']) else: compile_args = {} libraries", "import setup, find_packages, Extension from Cython.Distutils import build_ext #from distutils.core import setup #from", "True # a define the version sting inside the package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package", "] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ] setup(name='speedyfx', version=version, description='', author='', cmdclass= {'build_ext':", "Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ] setup(name='speedyfx', version=version, description='', author='', cmdclass= {'build_ext': build_ext}, ext_modules=", "VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: version", "include_dirs=include_dirs, packages=find_packages(), include_package_data=True, ) # conda create -n speedyfx-nskl-env setuptools six cython scipy", "raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,)) if sys.platform !=", "'win32': compile_args = dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native']) else: compile_args =", "version=version, description='', author='', cmdclass= {'build_ext': build_ext}, ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True, ) #", "os.name == 'posix': libraries.append('m') include_dirs= [ np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args),", "utf-8 -*- import os import sys import re from setuptools import setup, find_packages,", "= [] if os.name == 'posix': libraries.append('m') include_dirs= [ np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\",", "-*- import os import sys import re from setuptools import setup, find_packages, Extension", "(VERSIONFILE,)) if sys.platform != 'win32': compile_args = dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native',", "sys.platform != 'win32': compile_args = dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native']) else:", "libraries = [] if os.name == 'posix': libraries.append('m') include_dirs= [ np.get_include() ] ext_modules=[", "['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: version = mo.group(1) else: raise", "% (VERSIONFILE,)) if sys.platform != 'win32': compile_args = dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2',", "= re.search(VSRE, verstrline, re.M) if mo: version = mo.group(1) else: raise RuntimeError(\"Unable to", "define the version sting inside the package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline =", "'-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native']) else: compile_args = {} libraries = [] if os.name", "re.search(VSRE, verstrline, re.M) if mo: version = mo.group(1) else: raise RuntimeError(\"Unable to find", "open(VERSIONFILE, \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if", "distutils.core import setup #from distutils.extension import Extension import numpy as np import Cython.Compiler.Options", "mo.group(1) else: raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,)) if", "import sys import re from setuptools import setup, find_packages, Extension from Cython.Distutils import", "np import Cython.Compiler.Options Cython.Compiler.Options.annotate = True # a define the version sting inside", "include_dirs= [ np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ] setup(name='speedyfx', version=version, description='',", "= r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: version =", "if os.name == 'posix': libraries.append('m') include_dirs= [ np.get_include() ] ext_modules=[ Extension(\"speedyfx._hashing\", [\"speedyfx/_hashing.pyx\"], libraries=libraries,", "Cython.Compiler.Options.annotate = True # a define the version sting inside the package #", "compile_args = {} libraries = [] if os.name == 'posix': libraries.append('m') include_dirs= [", "#from distutils.extension import Extension import numpy as np import Cython.Compiler.Options Cython.Compiler.Options.annotate = True", "{'build_ext': build_ext}, ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(), include_package_data=True, ) # conda create -n speedyfx-nskl-env", "verstrline = open(VERSIONFILE, \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline,", "else: compile_args = {} libraries = [] if os.name == 'posix': libraries.append('m') include_dirs=", "'-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native']) else: compile_args = {} libraries = [] if", "speedyfx-nskl-env setuptools six cython scipy numpy pytest python=3.5 # conda env remove -n", "Cython.Distutils import build_ext #from distutils.core import setup #from distutils.extension import Extension import numpy", "if mo: version = mo.group(1) else: raise RuntimeError(\"Unable to find version string in", "compile_args = dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native']) else: compile_args = {}", "numpy as np import Cython.Compiler.Options Cython.Compiler.Options.annotate = True # a define the version", "# conda create -n speedyfx-nskl-env setuptools six cython scipy numpy pytest python=3.5 #", "create -n speedyfx-nskl-env setuptools six cython scipy numpy pytest python=3.5 # conda env", "version = mo.group(1) else: raise RuntimeError(\"Unable to find version string in %s.\" %", "#!/usr/bin/python # -*- coding: utf-8 -*- import os import sys import re from", "from Cython.Distutils import build_ext #from distutils.core import setup #from distutils.extension import Extension import", "in %s.\" % (VERSIONFILE,)) if sys.platform != 'win32': compile_args = dict( extra_compile_args=['-O2', '-march=native',", "= mo.group(1) else: raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,))", "packages=find_packages(), include_package_data=True, ) # conda create -n speedyfx-nskl-env setuptools six cython scipy numpy", "%s.\" % (VERSIONFILE,)) if sys.platform != 'win32': compile_args = dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'],", "verstrline, re.M) if mo: version = mo.group(1) else: raise RuntimeError(\"Unable to find version", "see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE, \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo", "else: raise RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,)) if sys.platform", "Cython.Compiler.Options Cython.Compiler.Options.annotate = True # a define the version sting inside the package", "version string in %s.\" % (VERSIONFILE,)) if sys.platform != 'win32': compile_args = dict(", "import Extension import numpy as np import Cython.Compiler.Options Cython.Compiler.Options.annotate = True # a", "version sting inside the package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline = open(VERSIONFILE, \"rt\").read()", "a define the version sting inside the package # see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package VERSIONFILE=\"speedyfx/_version.py\" verstrline", "[\"speedyfx/_hashing.pyx\"], libraries=libraries, **compile_args), ] setup(name='speedyfx', version=version, description='', author='', cmdclass= {'build_ext': build_ext}, ext_modules= ext_modules,", "\"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo:", "os import sys import re from setuptools import setup, find_packages, Extension from Cython.Distutils", "#from distutils.core import setup #from distutils.extension import Extension import numpy as np import", "RuntimeError(\"Unable to find version string in %s.\" % (VERSIONFILE,)) if sys.platform != 'win32':", "= True # a define the version sting inside the package # see", "libraries=libraries, **compile_args), ] setup(name='speedyfx', version=version, description='', author='', cmdclass= {'build_ext': build_ext}, ext_modules= ext_modules, include_dirs=include_dirs,", "mo = re.search(VSRE, verstrline, re.M) if mo: version = mo.group(1) else: raise RuntimeError(\"Unable", "dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'], extra_link_args=['-O2', '-march=native', '-mtune=native']) else: compile_args = {} libraries =", "**compile_args), ] setup(name='speedyfx', version=version, description='', author='', cmdclass= {'build_ext': build_ext}, ext_modules= ext_modules, include_dirs=include_dirs, packages=find_packages(),", "sys import re from setuptools import setup, find_packages, Extension from Cython.Distutils import build_ext", "extra_link_args=['-O2', '-march=native', '-mtune=native']) else: compile_args = {} libraries = [] if os.name ==", "to find version string in %s.\" % (VERSIONFILE,)) if sys.platform != 'win32': compile_args" ]
[ "log): # The client might has problem to access Barreyele server, find the", "utils from pycoral import lustre_version from pycoral import ssh_host from pybarreleye import barrele_collectd", "= [%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 return", "!= \"x86_64\": log.cl_error(\"host [%s] has unsupported CPU type [%s]\", self.bea_host.sh_hostname, cpu_target) return -1", "Whether to collect disk metrics from this agent. self.bea_enable_disk = enable_disk # Whether", "-1 ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed to check the Lustre version", "Collectd\") return None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config def bea_generate_configs(self, log, barreleye_instance): \"\"\"", "element\", json_string) return -1 serie = series[0] if \"columns\" not in serie: log.cl_debug(\"got", "-1 # If the hostname is inconsistent with the configured hostname, # fqdn", "0 def _bea_sanity_check(self, log): \"\"\" Sanity check of the host before installation \"\"\"", "Check the Lustre version according to the installed RPMs \"\"\" # pylint: disable=too-many-return-statements,too-many-branches", "host.sh_hostname) ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if ret: log.cl_error(\"failed to send test config", "host [%s], \" \"using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version return 0", "\"collectd\" host = self.bea_host ret = host.sh_service_start(log, service_name) if ret: log.cl_error(\"failed to start", "if ret: log.cl_error(\"failed to enable service [%s] on host [%s]\", service_name, host.sh_hostname) return", "check the Lustre version on Barreleye \" \"agent [%s]\", self.bea_host.sh_hostname) return -1 collectd_config", "[%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 return 0 def _bea_sanity_check(self, log):", "this agent. self.bea_enable_lustre_oss = enable_lustre_oss # Whether to collect Lustre MDS metrics from", "\"\": tag_string += \" AND\" else: tag_string = \" WHERE\" tag_string += (\"", "log): \"\"\" Check whether the Collectd is running. Return 1 if running. Return", "self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb doesn't have expected data points from \" \"agent [%s]\",", "-1 def bea_collectd_stop(self, log): \"\"\" Stop Collectd service. \"\"\" service_name = \"collectd\" host", "self.bea_collectd_config_for_test else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production fpath += \".\" + host.sh_hostname", "return -1 log.cl_info(\"checking whether Influxdb can get data points from \" \"agent [%s]\",", "\"directory [%s] on host [%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1 return 0", "distro [%s]\", self.bea_host.sh_hostname, distro) return -1 cpu_target = self.bea_host.sh_target_cpu(log) if cpu_target is None:", "Collectd config for production \" \"usage\") return -1 self.bea_collectd_config_for_production = collectd_config # Check", "len(results) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [results] is not a \"", "self.bea_collectd_config_for_test = collectd_config collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if collectd_config is None: log.cl_error(\"failed", "lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version else: log.cl_info(\"detected Lustre version [%s] on host [%s]\", version.lv_name,", "on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed", "None # Collectd config for production. Type: CollectdConfig self.bea_collectd_config_for_production = None def _bea_check_connection_with_server(self,", "if len(results) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [results] is not a", "[%s] \" \"from agent [%s]\", measurement_name, self.bea_host.sh_hostname) return -1 return 0 def bea_collectd_send_config(self,", "service. \"\"\" service_name = \"collectd\" host = self.bea_host ret = host.sh_service_stop(log, service_name) if", "status [%d] with query [%s]\", response.status_code, query) return -1 data = response.json() json_string", "this agent. self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The last timestamp when a", "log.cl_error(\"failed to stop [%s] service on agent host [%s]\", service_name, host.sh_hostname) return -1", "\"\"\" host = self.bea_host fpath = barreleye_instance.bei_workspace + \"/\" if test_config: fpath +=", "serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [values] in one \" \"of the", "column in columns: if column == \"time\": time_index = i break i +=", "barreleye_instance, collectd_test=True) if collectd_config is None: log.cl_error(\"failed to generate Collectd config for test\")", "unexpected. hostname = retval.cr_stdout.strip() if hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s] of Barreleye", "not in data: log.cl_debug(\"got wrong InfluxDB data [%s], no [results]\", json_string) return -1", "\" \"array with only one element\", json_string) return -1 value = serie_values[0] time_index", "= self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye agent host [%s] is insane\", self.bea_host.sh_hostname) return -1", "Check whether influxdb has datapoint \"\"\" if \"fqdn\" not in tags: tags[\"fqdn\"] =", "host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_start(self, log): \"\"\" Start", "series\", json_string) return -1 serie_values = serie[\"values\"] if len(serie_values) != 1: log.cl_debug(\"got wrong", "Type: CollectdConfig self.bea_collectd_config_for_production = None def _bea_check_connection_with_server(self, log): # The client might has", "0 def _bea_influxdb_measurement_check(self, log, measurement_name, tags): # pylint: disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\"", "self.bea_host.sh_hostname, distro) return -1 cpu_target = self.bea_host.sh_target_cpu(log) if cpu_target is None: log.cl_error(\"failed to", "Collectd service on host [%s]\", host.sh_hostname) return -1 log.cl_info(\"checking whether Influxdb can get", "log.cl_error(\"failed to send test config to Barreleye agent \" \"on host [%s]\", self.bea_host.sh_hostname)", "-1 command = (\"hostname\") retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run", "self.bea_enable_lustre_mds = enable_lustre_mds # Whether to collect Lustre client metrics from this agent.", "series\", json_string) return -1 columns = serie[\"columns\"] if \"values\" not in serie: log.cl_debug(\"got", "**tags): \"\"\" Check whether influxdb has datapoint \"\"\" if \"fqdn\" not in tags:", "= lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True) if version is None: log.cl_warning(\"failed to match Lustre", "to check the connection of Barreleye agent \" \"[%s] with server\", self.bea_host.sh_hostname) return", "= collectd_config collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if collectd_config is None: log.cl_error(\"failed to", "host.sh_rpm_version(log, \"collectd-\") if version is None: log.cl_error(\"failed to get the Collectd RPM version", "bea_collectd_send_config(self, log, barreleye_instance, test_config=False): \"\"\" Dump and send the collectd.conf to the agent", "self.bea_enable_lustre_client = enable_lustre_client # Whether to collect Infiniband metrics from this agent. self.bea_enable_infiniband", "enable service [%s] on host [%s]\", service_name, host.sh_hostname) return -1 return 0 def", "= [%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 #", "config for production \" \"usage\") return -1 self.bea_collectd_config_for_production = collectd_config # Check that", "[series] in one \" \"of the result\", json_string) return -1 series = result[\"series\"]", "return -1 self.bea_collectd_config_for_test = collectd_config collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if collectd_config is", "from \" \"agent [%s]\", host.sh_hostname) return -1 ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if", "to generate Collectd config for test\") return -1 self.bea_collectd_config_for_test = collectd_config collectd_config =", "return -1 return 0 def _bea_sanity_check(self, log): \"\"\" Sanity check of the host", "of this type \"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host, barreleye_server, enable_disk=False, enable_lustre_oss=True,", "-1 if response.status_code != HTTPStatus.OK: log.cl_debug(\"got InfluxDB status [%d] with query [%s]\", response.status_code,", "self.bea_host ret = host.sh_service_stop(log, service_name) if ret: log.cl_error(\"failed to stop [%s] service on", "barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on", "for production \" \"usage\") return -1 self.bea_collectd_config_for_production = collectd_config # Check that needed", "log.cl_error(\"failed to get target cpu on host [%s]\", self.bea_host.sh_hostname) return -1 if cpu_target", "barrele_collectd.COLLECTD_INTERVAL_TEST else: interval = barreleye_instance.bei_collect_interval collectd_config = \\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss", "ret: log.cl_error(\"failed to stop [%s] service on agent host [%s]\", service_name, host.sh_hostname) return", "to enable service [%s] on host [%s]\", service_name, host.sh_hostname) return -1 return 0", "tag_string != \"\": tag_string += \" AND\" else: tag_string = \" WHERE\" tag_string", "command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 return 0 def _bea_sanity_check(self, log): \"\"\"", "0 log.cl_error(\"unexpected stdout of command [%s] on host [%s], \" \"ret = [%d],", "unsupported CPU type [%s]\", self.bea_host.sh_hostname, cpu_target) return -1 command = (\"hostname\") retval =", "result[\"series\"] if len(series) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [series] is not", "log.cl_error(\"inconsistent hostname [%s] of Barreleye agent \" \"host [%s]\", hostname, self.bea_host.sh_hostname) return -1", "0 def _bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False): \"\"\" Generate Collectd config \"\"\" if collectd_test:", "return 0 def bea_collectd_running(self, log): \"\"\" Check whether the Collectd is running. Return", "Each agent has an object of this type \"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes def", "has datapoint \"\"\" if \"fqdn\" not in tags: tags[\"fqdn\"] = self.bea_host.sh_hostname ret =", "retval = self.bea_host.sh_run(log, command) if retval.cr_stdout == \"active\\n\": return 1 if retval.cr_stdout ==", "return -1 timestamp = int(value[time_index]) if self.bea_influxdb_update_time is None: self.bea_influxdb_update_time = timestamp elif", "ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed to check the Lustre version on", "host.sh_service_stop(log, service_name) if ret: log.cl_error(\"failed to stop [%s] service on agent host [%s]\",", "version on this host. self.bea_lustre_version = None # Collectd RPMs needed to be", "self.bea_host.sh_hostname) self.bea_lustre_version = version return 0 def _bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False): \"\"\" Generate", "== \"unknown\\n\": return 0 if retval.cr_stdout == \"inactive\\n\": return 0 log.cl_error(\"unexpected stdout of", "service on agent host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_version(self,", "-1 return 0 def _bea_influxdb_measurement_check(self, log, measurement_name, tags): # pylint: disable=bare-except,too-many-return-statements # pylint:", "to send test config to Barreleye agent \" \"on host [%s]\", self.bea_host.sh_hostname) return", "to the installed RPMs \"\"\" # pylint: disable=too-many-return-statements,too-many-branches # Old Lustre kernel RPM", "epoch=\"s\") if response is None: log.cl_debug(\"failed to with query Influxdb with query [%s]\",", "= timestamp elif timestamp > self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp [%d] is not updated", "ret: log.cl_error(\"failed to send file [%s] on local host [%s] to \" \"directory", "[%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 rpm_names = retval.cr_stdout.split() rpm_fnames =", "return -1 series = result[\"series\"] if len(series) != 1: log.cl_debug(\"got wrong InfluxDB data", "= barrele_collectd.COLLECTD_INTERVAL_TEST else: interval = barreleye_instance.bei_collect_interval collectd_config = \\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if", "tag_string += \" AND\" else: tag_string = \" WHERE\" tag_string += (\" %s", "= results[0] if \"series\" not in result: log.cl_debug(\"got wrong InfluxDB data [%s], no", "points will be unexpected. hostname = retval.cr_stdout.strip() if hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname", "\"array with only one element\", json_string) return -1 result = results[0] if \"series\"", "_bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\" Check the Lustre version according to the installed RPMs", "log.cl_error(\"host [%s] has unsupported distro [%s]\", self.bea_host.sh_hostname, distro) return -1 cpu_target = self.bea_host.sh_target_cpu(log)", "server with thye of BarreleServer self.bea_barreleye_server = barreleye_server # Host to run commands.", "\"using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version return 0 if retval.cr_exit_status: log.cl_error(\"failed", "0 def bea_collectd_send_config(self, log, barreleye_instance, test_config=False): \"\"\" Dump and send the collectd.conf to", "agent host [%s] is insane\", self.bea_host.sh_hostname) return -1 ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if", "\" \"[%s] with server\", self.bea_host.sh_hostname) return -1 distro = self.bea_host.sh_distro(log) if distro not", "(\"hostname\") retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on", "-1 serie = series[0] if \"columns\" not in serie: log.cl_debug(\"got wrong InfluxDB data", "if ret: log.cl_error(\"Influxdb doesn't have expected data points from \" \"agent [%s]\", host.sh_hostname)", "(\" %s = '%s'\" % (key, value)) query = ('SELECT * FROM \"%s\"%s", "= self.bea_host ret = host.sh_service_start(log, service_name) if ret: log.cl_error(\"failed to start [%s] service", "= \"systemctl is-active collectd\" retval = self.bea_host.sh_run(log, command) if retval.cr_stdout == \"active\\n\": return", "retval.cr_stdout == \"unknown\\n\": return 0 if retval.cr_stdout == \"inactive\\n\": return 0 log.cl_error(\"unexpected stdout", "data point for measurement [%s] \" \"from agent [%s]\", measurement_name, self.bea_host.sh_hostname) return -1", "== \"\"): log.cl_info(\"Lustre RPM is not installed on host [%s], \" \"using default", "send the collectd.conf to the agent host \"\"\" host = self.bea_host fpath =", "LIMIT 1;' % (measurement_name, tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log, query, epoch=\"s\")", "[%s]\", host.sh_hostname) return -1 log.cl_info(\"checking whether Influxdb can get data points from \"", "\"\"\" Check whether influxdb has datapoint \"\"\" if \"fqdn\" not in tags: tags[\"fqdn\"]", "no data point for measurement [%s] \" \"from agent [%s]\", measurement_name, self.bea_host.sh_hostname) return", "host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_running(self, log): \"\"\" Check", "for test\") return -1 self.bea_collectd_config_for_test = collectd_config collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if", "InfluxDB \"\"\" tag_string = \"\" for key, value in tags.items(): if tag_string !=", "self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp [%d] is not updated with query [%s]\", timestamp, query)", "Lustre version on Barreleye \" \"agent [%s]\", self.bea_host.sh_hostname) return -1 collectd_config = self._bea_generate_collectd_config(log,", "on host [%s], \" \"ret = [%d], stdout = [%s], stderr = [%s]\",", "def __init__(self, host, barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): # Barreleye server with", "self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version else: log.cl_info(\"detected Lustre version [%s] on host [%s]\",", "http import HTTPStatus from pycoral import utils from pycoral import lustre_version from pycoral", "ret: log.cl_error(\"failed to restart Barreleye agent on host [%s]\", host.sh_hostname) return -1 ret", "Check whether the datapoint is recieved by InfluxDB \"\"\" tag_string = \"\" for", "if column == \"time\": time_index = i break i += 1 if time_index", "collectd_test=True) if collectd_config is None: log.cl_error(\"failed to generate Collectd config for test\") return", "\"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host, barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False):", "return 0 log.cl_debug(\"timestamp [%d] is not updated with query [%s]\", timestamp, query) return", "retval.cr_stderr) return -1 # If the hostname is inconsistent with the configured hostname,", "value in tags.items(): if tag_string != \"\": tag_string += \" AND\" else: tag_string", "collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST else: interval = barreleye_instance.bei_collect_interval collectd_config = \\ barrele_collectd.CollectdConfig(self, interval,", "barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): # Barreleye server with thye of BarreleServer", "[%s] on host [%s], \" \"ret = [%d], stdout = [%s], stderr =", "ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if ret: log.cl_error(\"failed to send final Collectd config", "if ret: log.cl_error(\"Barreleye agent host [%s] is insane\", self.bea_host.sh_hostname) return -1 ret =", "if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client,", "etc_path) if ret: log.cl_error(\"failed to send file [%s] on local host [%s] to", "# problem as early as possible. barreleye_server = self.bea_barreleye_server command = (\"ping -c", "enable_infiniband=False): # Barreleye server with thye of BarreleServer self.bea_barreleye_server = barreleye_server # Host", "host.sh_send_file(log, fpath, etc_path) if ret: log.cl_error(\"failed to send file [%s] on local host", "columns: if column == \"time\": time_index = i break i += 1 if", "self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host [%s], \"", "in rpm_names: rpm_fnames.append(rpm_name + \".rpm\") version, _ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True) if", "according to RPM \" \"names on host [%s], using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name)", "metrics from this agent. self.bea_enable_lustre_oss = enable_lustre_oss # Whether to collect Lustre MDS", "timestamp, query) return -1 def bea_influxdb_measurement_check(self, log, measurement_name, **tags): \"\"\" Check whether influxdb", "_bea_check_connection_with_server(self, log): # The client might has problem to access Barreyele server, find", "-1 ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if ret: log.cl_error(\"failed to send final Collectd", "\"array with only one element\", json_string) return -1 serie = series[0] if \"columns\"", "RPM \" \"names on host [%s], using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version =", "fqdn tag of the data points will be unexpected. hostname = retval.cr_stdout.strip() if", "i = 0 for column in columns: if column == \"time\": time_index =", "= lustre_fallback_version else: log.cl_info(\"detected Lustre version [%s] on host [%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version", "service on host [%s]\", host.sh_hostname) return -1 log.cl_info(\"checking whether Influxdb can get data", "Lustre. \"\"\" import json from http import HTTPStatus from pycoral import utils from", "with only one element\", json_string) return -1 result = results[0] if \"series\" not", "OSS metrics from this agent. self.bea_enable_lustre_oss = enable_lustre_oss # Whether to collect Lustre", "host = self.bea_host ret = host.sh_service_start(log, service_name) if ret: log.cl_error(\"failed to start [%s]", "value)) query = ('SELECT * FROM \"%s\"%s ORDER BY time DESC LIMIT 1;'", "ret = self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb doesn't have expected data points from \"", "unsupported distro [%s]\", self.bea_host.sh_hostname, distro) return -1 cpu_target = self.bea_host.sh_target_cpu(log) if cpu_target is", "\"agent on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_restart(log, service_name) if ret:", "version = host.sh_rpm_version(log, \"collectd-\") if version is None: log.cl_error(\"failed to get the Collectd", "collectd_config is None: log.cl_error(\"failed to generate Collectd config for test\") return -1 self.bea_collectd_config_for_test", "command) if (retval.cr_exit_status == 1 and retval.cr_stdout == \"\" and retval.cr_stderr == \"\"):", "enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed to config Lustre plugin of Collectd\") return None if", "only one element\", json_string) return -1 serie = series[0] if \"columns\" not in", "host [%s]\", host.sh_hostname) return -1 log.cl_info(\"checking whether Influxdb can get data points from", "class BarreleAgent(): \"\"\" Each agent has an object of this type \"\"\" #", "barreleye_server # Host to run commands. self.bea_host = host # Whether to collect", "self.bea_host.sh_hostname) return -1 distro = self.bea_host.sh_distro(log) if distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host", "stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 return 0 def", "agent host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_start(self, log): \"\"\"", "log.cl_error(\"Barreleye agent host [%s] is insane\", self.bea_host.sh_hostname) return -1 ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version)", "Collectd config for test\") return -1 self.bea_collectd_config_for_test = collectd_config collectd_config = self._bea_generate_collectd_config(log, barreleye_instance,", "\"\"\" ret = self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed to check the connection of Barreleye", "retval.cr_stdout, retval.cr_stderr) return -1 def bea_collectd_stop(self, log): \"\"\" Stop Collectd service. \"\"\" service_name", "= i break i += 1 if time_index == -1: log.cl_debug(\"got wrong InfluxDB", "that needed collectd RPMs are installed for rpm_type in self.bea_needed_collectd_rpm_types: if rpm_type not", "host # Whether to collect disk metrics from this agent. self.bea_enable_disk = enable_disk", "collectd RPMs are installed for rpm_type in self.bea_needed_collectd_rpm_types: if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict:", "+= \" AND\" else: tag_string = \" WHERE\" tag_string += (\" %s =", "might has problem to access Barreyele server, find the # problem as early", "Steps before configuring Barreleye agent \"\"\" ret = self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye agent", "# pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether the datapoint is recieved by InfluxDB \"\"\"", "is None: log.cl_error(\"failed to generate Collectd config for production \" \"usage\") return -1", "hostname, # fqdn tag of the data points will be unexpected. hostname =", "return -1 service_name = \"collectd\" ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to", "\" \"directory [%s] on host [%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1 return", "% (key, value)) query = ('SELECT * FROM \"%s\"%s ORDER BY time DESC", "restart Barreleye agent on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_enable(log, service_name)", "datapoint \"\"\" if \"fqdn\" not in tags: tags[\"fqdn\"] = self.bea_host.sh_hostname ret = utils.wait_condition(log,", "[%s]\", measurement_name, self.bea_host.sh_hostname) return -1 return 0 def bea_collectd_send_config(self, log, barreleye_instance, test_config=False): \"\"\"", "= version return 0 def _bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False): \"\"\" Generate Collectd config", "wrong InfluxDB data [%s], no [series] in one \" \"of the result\", json_string)", "\"\"\" Configure agent \"\"\" host = self.bea_host log.cl_info(\"configuring Collectd on host [%s]\", host.sh_hostname)", "cpu_target) return -1 command = (\"hostname\") retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed", "\" \"array with only one element\", json_string) return -1 serie = series[0] if", "from pycoral import utils from pycoral import lustre_version from pycoral import ssh_host from", "self.bea_enable_lustre_oss = enable_lustre_oss # Whether to collect Lustre MDS metrics from this agent.", "Lustre version on this host. self.bea_lustre_version = None # Collectd RPMs needed to", "ret = host.sh_send_file(log, fpath, etc_path) if ret: log.cl_error(\"failed to send file [%s] on", "[%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 # If the hostname is", "= self.bea_barreleye_server command = (\"ping -c 1 %s\" % barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log,", "Barreleye agent on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_enable(log, service_name) if", "self.bea_influxdb_update_time = timestamp elif timestamp > self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp [%d] is not", "\"\"\" Dump and send the collectd.conf to the agent host \"\"\" host =", "one \" \"of the series\", json_string) return -1 serie_values = serie[\"values\"] if len(serie_values)", "!= 1: log.cl_debug(\"got wrong InfluxDB data [%s], [results] is not a \" \"array", "Collectd config for test. Type: CollectdConfig self.bea_collectd_config_for_test = None # Collectd config for", "command = (\"rpm -qa | grep lustre | grep -v kernel\") retval =", "= self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if ret: log.cl_error(\"failed to send test config to Barreleye", "installation \"\"\" ret = self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed to check the connection of", "CollectdConfig self.bea_collectd_config_for_test = None # Collectd config for production. Type: CollectdConfig self.bea_collectd_config_for_production =", "self.bea_host = host # Whether to collect disk metrics from this agent. self.bea_enable_disk", "== \"inactive\\n\": return 0 log.cl_error(\"unexpected stdout of command [%s] on host [%s], \"", "<reponame>LiXi-storage/barreleye \"\"\" Library for Barreleye agent. Barreleye is a performance monitoring system for", "InfluxDB data [%s], no [series] in one \" \"of the result\", json_string) return", "self.bea_host.sh_distro(log) if distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has unsupported distro [%s]\",", "Barreleye agent. Barreleye is a performance monitoring system for Lustre. \"\"\" import json", "Lustre version according to the installed RPMs \"\"\" # pylint: disable=too-many-return-statements,too-many-branches # Old", "return collectd_config def bea_generate_configs(self, log, barreleye_instance): \"\"\" Steps before configuring Barreleye agent \"\"\"", "with only one element\", json_string) return -1 value = serie_values[0] time_index = -1", "ret = self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye agent host [%s] is insane\", self.bea_host.sh_hostname) return", "= serie[\"values\"] if len(serie_values) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [values] is", "self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 def bea_collectd_stop(self, log): \"\"\" Stop Collectd service.", "\"time\": time_index = i break i += 1 if time_index == -1: log.cl_debug(\"got", "bea_config_agent(self, log, barreleye_instance): \"\"\" Configure agent \"\"\" host = self.bea_host log.cl_info(\"configuring Collectd on", "config for test. Type: CollectdConfig self.bea_collectd_config_for_test = None # Collectd config for production.", "!= self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s] of Barreleye agent \" \"host [%s]\", hostname, self.bea_host.sh_hostname)", "for Lustre. \"\"\" import json from http import HTTPStatus from pycoral import utils", "needed to be installed in this agent. self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] #", "to restart Barreleye agent on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_enable(log,", "barreleye_instance.bei_workspace + \"/\" if test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test else: fpath", "[%s] has unsupported distro [%s]\", self.bea_host.sh_hostname, distro) return -1 cpu_target = self.bea_host.sh_target_cpu(log) if", "self.bea_lustre_version = lustre_fallback_version else: log.cl_info(\"detected Lustre version [%s] on host [%s]\", version.lv_name, self.bea_host.sh_hostname)", "[] for rpm_name in rpm_names: rpm_fnames.append(rpm_name + \".rpm\") version, _ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames,", "\"\"\" Steps before configuring Barreleye agent \"\"\" ret = self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye", "points from \" \"agent [%s]\", host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb doesn't", "\"\"\" Sanity check of the host before installation \"\"\" ret = self._bea_check_connection_with_server(log) if", "only one element\", json_string) return -1 result = results[0] if \"series\" not in", "on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_enable(log, service_name) if ret: log.cl_error(\"failed", "a performance monitoring system for Lustre. \"\"\" import json from http import HTTPStatus", "retval.cr_stderr) return -1 def bea_collectd_stop(self, log): \"\"\" Stop Collectd service. \"\"\" service_name =", "the Lustre version on Barreleye \" \"agent [%s]\", self.bea_host.sh_hostname) return -1 collectd_config =", "\" \"ret = [%d], stdout = [%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status,", "to send file [%s] on local host [%s] to \" \"directory [%s] on", "= \\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret =", "if ret: log.cl_error(\"failed to config Lustre plugin of Collectd\") return None if self.bea_enable_infiniband:", "agent \" \"on host [%s]\", self.bea_host.sh_hostname) return -1 service_name = \"collectd\" ret =", "timestamp > self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp [%d] is not updated with query [%s]\",", "= result[\"series\"] if len(series) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [series] is", "retval.cr_stdout, retval.cr_stderr) return -1 rpm_names = retval.cr_stdout.split() rpm_fnames = [] for rpm_name in", "data [%s], [values] is not a \" \"array with only one element\", json_string)", "\"agent [%s]\", host.sh_hostname) return -1 ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if ret: log.cl_error(\"failed", "rpm_type, self.bea_host.sh_hostname) return -1 return 0 def _bea_influxdb_measurement_check(self, log, measurement_name, tags): # pylint:", "tag of the data points will be unexpected. hostname = retval.cr_stdout.strip() if hostname", "return -1 ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed to check the Lustre", "with query [%s]\", query) return -1 if response.status_code != HTTPStatus.OK: log.cl_debug(\"got InfluxDB status", "lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True) if version is None: log.cl_warning(\"failed to match Lustre version", "object of this type \"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host, barreleye_server, enable_disk=False,", "log.cl_info(\"Lustre RPM is not installed on host [%s], \" \"using default [%s]\", self.bea_host.sh_hostname,", "return -1 return 0 def bea_collectd_send_config(self, log, barreleye_instance, test_config=False): \"\"\" Dump and send", "else: log.cl_info(\"detected Lustre version [%s] on host [%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version = version", "has unsupported CPU type [%s]\", self.bea_host.sh_hostname, cpu_target) return -1 command = (\"hostname\") retval", "collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\" ret = host.sh_send_file(log, fpath, etc_path) if ret: log.cl_error(\"failed to", "fpath, etc_path) if ret: log.cl_error(\"failed to send file [%s] on local host [%s]", "def _bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False): \"\"\" Generate Collectd config \"\"\" if collectd_test: interval", "\"\"\" ret = self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye agent host [%s] is insane\", self.bea_host.sh_hostname)", "agent \"\"\" ret = self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye agent host [%s] is insane\",", "to Barreleye \" \"agent on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_restart(log,", "-1 columns = serie[\"columns\"] if \"values\" not in serie: log.cl_debug(\"got wrong InfluxDB data", "None: log.cl_error(\"failed to get the Collectd RPM version on host [%s]\", host.sh_hostname) return", "\".rpm\") version, _ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True) if version is None: log.cl_warning(\"failed", "RPMs needed to be installed in this agent. self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME]", "RPMs. command = (\"rpm -qa | grep lustre | grep -v kernel\") retval", "collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if collectd_config is None: log.cl_error(\"failed to generate Collectd", "# fqdn tag of the data points will be unexpected. hostname = retval.cr_stdout.strip()", "\"\"): log.cl_info(\"Lustre RPM is not installed on host [%s], \" \"using default [%s]\",", "time_index = i break i += 1 if time_index == -1: log.cl_debug(\"got wrong", "no [series] in one \" \"of the result\", json_string) return -1 series =", "version, _ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True) if version is None: log.cl_warning(\"failed to", "Influxdb can get data points from \" \"agent [%s]\", host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log)", "log): \"\"\" Sanity check of the host before installation \"\"\" ret = self._bea_check_connection_with_server(log)", "to \" \"directory [%s] on host [%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1", "host [%s] is insane\", self.bea_host.sh_hostname) return -1 ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret:", "= -1 i = 0 for column in columns: if column == \"time\":", "stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 # If the", "rpm_name in rpm_names: rpm_fnames.append(rpm_name + \".rpm\") version, _ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True)", "= host.sh_service_enable(log, service_name) if ret: log.cl_error(\"failed to enable service [%s] on host [%s]\",", "expected data points from \" \"agent [%s]\", host.sh_hostname) return -1 ret = self.bea_collectd_send_config(log,", "= None def _bea_check_connection_with_server(self, log): # The client might has problem to access", "Lustre kernel RPM might not be uninstalled ye, so ignore # kernel RPMs.", "= self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host [%s],", "self.bea_host.sh_hostname) return -1 if cpu_target != \"x86_64\": log.cl_error(\"host [%s] has unsupported CPU type", "if retval.cr_stdout == \"inactive\\n\": return 0 log.cl_error(\"unexpected stdout of command [%s] on host", "on agent host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_version(self, log):", "\" \"from agent [%s]\", measurement_name, self.bea_host.sh_hostname) return -1 return 0 def bea_collectd_send_config(self, log,", "(self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost,", "Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host = self.bea_host version = host.sh_rpm_version(log, \"collectd-\") if", "self.bea_lustre_version = version return 0 def _bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False): \"\"\" Generate Collectd", "for production. Type: CollectdConfig self.bea_collectd_config_for_production = None def _bea_check_connection_with_server(self, log): # The client", "agent. self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The last timestamp when a measurement", "if ret: log.cl_error(\"failed to check the Lustre version on Barreleye \" \"agent [%s]\",", "self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s] of Barreleye agent \" \"host [%s]\", hostname, self.bea_host.sh_hostname) return", "kernel RPM might not be uninstalled ye, so ignore # kernel RPMs. command", "Collectd is running. Return 1 if running. Return -1 if failure. \"\"\" command", "grep lustre | grep -v kernel\") retval = self.bea_host.sh_run(log, command) if (retval.cr_exit_status ==", "\"\"\" Library for Barreleye agent. Barreleye is a performance monitoring system for Lustre.", "on host [%s], using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version else: log.cl_info(\"detected", "to the agent host \"\"\" host = self.bea_host fpath = barreleye_instance.bei_workspace + \"/\"", "fpath = barreleye_instance.bei_workspace + \"/\" if test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test", "to collect Lustre OSS metrics from this agent. self.bea_enable_lustre_oss = enable_lustre_oss # Whether", "for key, value in tags.items(): if tag_string != \"\": tag_string += \" AND\"", "inconsistent with the configured hostname, # fqdn tag of the data points will", "log.cl_error(\"failed to restart Barreleye agent on host [%s]\", host.sh_hostname) return -1 ret =", "points from \" \"agent [%s]\", host.sh_hostname) return -1 ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=False)", "in one \" \"of the result\", json_string) return -1 series = result[\"series\"] if", "\"systemctl is-active collectd\" retval = self.bea_host.sh_run(log, command) if retval.cr_stdout == \"active\\n\": return 1", "ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has unsupported distro [%s]\", self.bea_host.sh_hostname, distro) return -1 cpu_target =", "whether influxdb has datapoint \"\"\" if \"fqdn\" not in tags: tags[\"fqdn\"] = self.bea_host.sh_hostname", "rpm_names: rpm_fnames.append(rpm_name + \".rpm\") version, _ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True) if version", "= barreleye_server # Host to run commands. self.bea_host = host # Whether to", "retval.cr_stdout.strip() if hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s] of Barreleye agent \" \"host", "-1 if failure. \"\"\" command = \"systemctl is-active collectd\" retval = self.bea_host.sh_run(log, command)", "self.bea_collectd_config_for_test = None # Collectd config for production. Type: CollectdConfig self.bea_collectd_config_for_production = None", "to stop [%s] service on agent host [%s]\", service_name, host.sh_hostname) return -1 return", "InfluxDB status [%d] with query [%s]\", response.status_code, query) return -1 data = response.json()", "retval.cr_stderr == \"\"): log.cl_info(\"Lustre RPM is not installed on host [%s], \" \"using", "= response.json() json_string = json.dumps(data, indent=4, separators=(',', ': ')) log.cl_debug(\"data: [%s]\", json_string) if", "using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version else: log.cl_info(\"detected Lustre version [%s]", "on agent host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_start(self, log):", "FROM \"%s\"%s ORDER BY time DESC LIMIT 1;' % (measurement_name, tag_string)) influxdb_client =", "return 0 def bea_config_agent(self, log, barreleye_instance): \"\"\" Configure agent \"\"\" host = self.bea_host", "metrics from this agent. self.bea_enable_lustre_client = enable_lustre_client # Whether to collect Infiniband metrics", "grep -v kernel\") retval = self.bea_host.sh_run(log, command) if (retval.cr_exit_status == 1 and retval.cr_stdout", "service_name) if ret: log.cl_error(\"failed to restart Collectd service on host [%s]\", host.sh_hostname) return", "if collectd_config is None: log.cl_error(\"failed to generate Collectd config for test\") return -1", "!= HTTPStatus.OK: log.cl_debug(\"got InfluxDB status [%d] with query [%s]\", response.status_code, query) return -1", "barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1 return 0 def bea_config_agent(self, log, barreleye_instance): \"\"\" Configure agent", "return -1 ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if ret: log.cl_error(\"failed to send final", "retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 def bea_collectd_stop(self, log): \"\"\" Stop Collectd service. \"\"\"", "this agent. self.bea_enable_disk = enable_disk # Whether to collect Lustre OSS metrics from", "been found to be updated. self.bea_influxdb_update_time = None # Collectd config for test.", "= retval.cr_stdout.split() rpm_fnames = [] for rpm_name in rpm_names: rpm_fnames.append(rpm_name + \".rpm\") version,", "= int(value[time_index]) if self.bea_influxdb_update_time is None: self.bea_influxdb_update_time = timestamp elif timestamp > self.bea_influxdb_update_time:", "# pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host, barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): #", "= enable_disk # Whether to collect Lustre OSS metrics from this agent. self.bea_enable_lustre_oss", "has an object of this type \"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host,", "\"fqdn\" not in tags: tags[\"fqdn\"] = self.bea_host.sh_hostname ret = utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags))", "log, barreleye_instance): \"\"\" Configure agent \"\"\" host = self.bea_host log.cl_info(\"configuring Collectd on host", "0 log.cl_debug(\"timestamp [%d] is not updated with query [%s]\", timestamp, query) return -1", "log.cl_error(\"failed to check the Lustre version on Barreleye \" \"agent [%s]\", self.bea_host.sh_hostname) return", "not a \" \"array with only one element\", json_string) return -1 value =", "influxdb_client = self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log, query, epoch=\"s\") if response is None: log.cl_debug(\"failed", "log.cl_error(\"failed to run command [%s] on host [%s], \" \"ret = [%d], stdout", "def bea_collectd_send_config(self, log, barreleye_instance, test_config=False): \"\"\" Dump and send the collectd.conf to the", "= json.dumps(data, indent=4, separators=(',', ': ')) log.cl_debug(\"data: [%s]\", json_string) if \"results\" not in", "host.sh_hostname) return -1 return 0 def bea_collectd_start(self, log): \"\"\" Start Collectd service. \"\"\"", "on host [%s]\", host.sh_hostname) ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if ret: log.cl_error(\"failed to", "stdout of command [%s] on host [%s], \" \"ret = [%d], stdout =", "self.bea_lustre_version = lustre_fallback_version return 0 if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on", "barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production fpath +=", "this agent. self.bea_enable_infiniband = enable_infiniband # Lustre version on this host. self.bea_lustre_version =", "serie_values[0] time_index = -1 i = 0 for column in columns: if column", "-qa | grep lustre | grep -v kernel\") retval = self.bea_host.sh_run(log, command) if", "\\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log,", "RPM [%s] of agent [%s] does not \" \"exist\", rpm_type, self.bea_host.sh_hostname) return -1", "agent \" \"host [%s]\", hostname, self.bea_host.sh_hostname) return -1 return 0 def _bea_check_lustre_version(self, log,", "def bea_config_agent(self, log, barreleye_instance): \"\"\" Configure agent \"\"\" host = self.bea_host log.cl_info(\"configuring Collectd", "= host.sh_service_start(log, service_name) if ret: log.cl_error(\"failed to start [%s] service on agent host", "1 if running. Return -1 if failure. \"\"\" command = \"systemctl is-active collectd\"", "= barreleye_instance.bei_workspace + \"/\" if test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test else:", "for Barreleye agent. Barreleye is a performance monitoring system for Lustre. \"\"\" import", "\" \"names on host [%s], using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version", "start [%s] service on agent host [%s]\", service_name, host.sh_hostname) return -1 return 0", "self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version return 0 if retval.cr_exit_status: log.cl_error(\"failed to run command", "int(value[time_index]) if self.bea_influxdb_update_time is None: self.bea_influxdb_update_time = timestamp elif timestamp > self.bea_influxdb_update_time: return", "run command [%s] on host [%s], \" \"ret = [%d], stdout = [%s],", "collectd_config collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if collectd_config is None: log.cl_error(\"failed to generate", "to run commands. self.bea_host = host # Whether to collect disk metrics from", "host = self.bea_host log.cl_info(\"configuring Collectd on host [%s]\", host.sh_hostname) ret = self.bea_collectd_send_config(log, barreleye_instance,", "[series] is not a \" \"array with only one element\", json_string) return -1", "get target cpu on host [%s]\", self.bea_host.sh_hostname) return -1 if cpu_target != \"x86_64\":", "disable=too-many-return-statements,too-many-branches # Old Lustre kernel RPM might not be uninstalled ye, so ignore", "has been found to be updated. self.bea_influxdb_update_time = None # Collectd config for", "whether the datapoint is recieved by InfluxDB \"\"\" tag_string = \"\" for key,", "__init__(self, host, barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): # Barreleye server with thye", "log.cl_debug(\"got wrong InfluxDB data [%s], no [results]\", json_string) return -1 results = data[\"results\"]", "log.cl_error(\"Influxdb doesn't have expected data points from \" \"agent [%s]\", host.sh_hostname) return -1", "(key, value)) query = ('SELECT * FROM \"%s\"%s ORDER BY time DESC LIMIT", "[%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 def bea_collectd_stop(self, log): \"\"\" Stop", "as early as possible. barreleye_server = self.bea_barreleye_server command = (\"ping -c 1 %s\"", "retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host [%s], \" \"ret = [%d],", "0 def bea_collectd_start(self, log): \"\"\" Start Collectd service. \"\"\" service_name = \"collectd\" host", "service_name = \"collectd\" host = self.bea_host ret = host.sh_service_stop(log, service_name) if ret: log.cl_error(\"failed", "is not installed on host [%s], \" \"using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version", "\" \"usage\") return -1 self.bea_collectd_config_for_production = collectd_config # Check that needed collectd RPMs", "host.sh_hostname) return -1 return 0 def bea_collectd_running(self, log): \"\"\" Check whether the Collectd", "[%s], no [values] in one \" \"of the series\", json_string) return -1 serie_values", "if ret: log.cl_error(\"failed to restart Barreleye agent on host [%s]\", host.sh_hostname) return -1", "[%s]\", self.bea_host.sh_hostname) return -1 collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if collectd_config is None:", "distro) return -1 cpu_target = self.bea_host.sh_target_cpu(log) if cpu_target is None: log.cl_error(\"failed to get", "host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_version(self, log): \"\"\" Return", "to collect Infiniband metrics from this agent. self.bea_enable_infiniband = enable_infiniband # Lustre version", "service_name, host.sh_hostname) return -1 return 0 def bea_collectd_running(self, log): \"\"\" Check whether the", "1: log.cl_debug(\"got wrong InfluxDB data [%s], [series] is not a \" \"array with", "hostname [%s] of Barreleye agent \" \"host [%s]\", hostname, self.bea_host.sh_hostname) return -1 return", "when a measurement has been found to be updated. self.bea_influxdb_update_time = None #", "enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed to config Lustre plugin of", "ret: log.cl_error(\"failed to enable service [%s] on host [%s]\", service_name, host.sh_hostname) return -1", "BarreleAgent(): \"\"\" Each agent has an object of this type \"\"\" # pylint:", "self.bea_host.sh_hostname) return -1 return 0 def _bea_influxdb_measurement_check(self, log, measurement_name, tags): # pylint: disable=bare-except,too-many-return-statements", "* FROM \"%s\"%s ORDER BY time DESC LIMIT 1;' % (measurement_name, tag_string)) influxdb_client", "json_string) return -1 series = result[\"series\"] if len(series) != 1: log.cl_debug(\"got wrong InfluxDB", "interval = barreleye_instance.bei_collect_interval collectd_config = \\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds", "host [%s], \" \"ret = [%d], stdout = [%s], stderr = [%s]\", command,", "[%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version else: log.cl_info(\"detected Lustre version [%s] on host", "to collect disk metrics from this agent. self.bea_enable_disk = enable_disk # Whether to", "tag_string = \"\" for key, value in tags.items(): if tag_string != \"\": tag_string", "collect Lustre MDS metrics from this agent. self.bea_enable_lustre_mds = enable_lustre_mds # Whether to", "running. Return 1 if running. Return -1 if failure. \"\"\" command = \"systemctl", "Whether to collect Infiniband metrics from this agent. self.bea_enable_infiniband = enable_infiniband # Lustre", "= \"collectd\" host = self.bea_host ret = host.sh_service_stop(log, service_name) if ret: log.cl_error(\"failed to", "(retval.cr_exit_status == 1 and retval.cr_stdout == \"\" and retval.cr_stderr == \"\"): log.cl_info(\"Lustre RPM", "# The client might has problem to access Barreyele server, find the #", "collectd\" retval = self.bea_host.sh_run(log, command) if retval.cr_stdout == \"active\\n\": return 1 if retval.cr_stdout", "if ret: log.cl_error(\"failed to start [%s] service on agent host [%s]\", service_name, host.sh_hostname)", "this agent. self.bea_enable_lustre_mds = enable_lustre_mds # Whether to collect Lustre client metrics from", "to generate Collectd config for production \" \"usage\") return -1 self.bea_collectd_config_for_production = collectd_config", "stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 def bea_collectd_stop(self, log):", "this agent. self.bea_enable_lustre_client = enable_lustre_client # Whether to collect Infiniband metrics from this", "ret: log.cl_error(\"failed to check the Lustre version on Barreleye \" \"agent [%s]\", self.bea_host.sh_hostname)", "self.bea_host.sh_hostname) return -1 collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if collectd_config is None: log.cl_error(\"failed", "| grep lustre | grep -v kernel\") retval = self.bea_host.sh_run(log, command) if (retval.cr_exit_status", "HTTPStatus from pycoral import utils from pycoral import lustre_version from pycoral import ssh_host", "if retval.cr_stdout == \"unknown\\n\": return 0 if retval.cr_stdout == \"inactive\\n\": return 0 log.cl_error(\"unexpected", "[results] is not a \" \"array with only one element\", json_string) return -1", "not a \" \"array with only one element\", json_string) return -1 serie =", "\"\"\" command = \"systemctl is-active collectd\" retval = self.bea_host.sh_run(log, command) if retval.cr_stdout ==", "send file [%s] on local host [%s] to \" \"directory [%s] on host", "are installed for rpm_type in self.bea_needed_collectd_rpm_types: if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd", "retval.cr_stdout == \"inactive\\n\": return 0 log.cl_error(\"unexpected stdout of command [%s] on host [%s],", "= \"\" for key, value in tags.items(): if tag_string != \"\": tag_string +=", "is running. Return 1 if running. Return -1 if failure. \"\"\" command =", "self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 rpm_names = retval.cr_stdout.split() rpm_fnames = [] for", "log.cl_debug(\"data: [%s]\", json_string) if \"results\" not in data: log.cl_debug(\"got wrong InfluxDB data [%s],", "Barreleye agent \" \"on host [%s]\", self.bea_host.sh_hostname) return -1 service_name = \"collectd\" ret", "[%s], no [columns] in one \" \"of the series\", json_string) return -1 columns", "\"unknown\\n\": return 0 if retval.cr_stdout == \"inactive\\n\": return 0 log.cl_error(\"unexpected stdout of command", "barreleye_instance, test_config=False): \"\"\" Dump and send the collectd.conf to the agent host \"\"\"", "\"exist\", rpm_type, self.bea_host.sh_hostname) return -1 return 0 def _bea_influxdb_measurement_check(self, log, measurement_name, tags): #", "log.cl_error(\"failed to enable service [%s] on host [%s]\", service_name, host.sh_hostname) return -1 return", "\\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The last timestamp when a measurement has been found", "agent host \"\"\" host = self.bea_host fpath = barreleye_instance.bei_workspace + \"/\" if test_config:", "cpu on host [%s]\", self.bea_host.sh_hostname) return -1 if cpu_target != \"x86_64\": log.cl_error(\"host [%s]", "be installed in this agent. self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The last", "lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version return 0 if retval.cr_exit_status: log.cl_error(\"failed to run command [%s]", "def bea_collectd_start(self, log): \"\"\" Start Collectd service. \"\"\" service_name = \"collectd\" host =", "host. self.bea_lustre_version = None # Collectd RPMs needed to be installed in this", "# The last timestamp when a measurement has been found to be updated.", "enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed to config Lustre plugin of Collectd\")", "# Host to run commands. self.bea_host = host # Whether to collect disk", "if response is None: log.cl_debug(\"failed to with query Influxdb with query [%s]\", query)", "with server\", self.bea_host.sh_hostname) return -1 distro = self.bea_host.sh_distro(log) if distro not in [ssh_host.DISTRO_RHEL7,", "serie[\"values\"] if len(serie_values) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [values] is not", "log, measurement_name, **tags): \"\"\" Check whether influxdb has datapoint \"\"\" if \"fqdn\" not", "return -1 distro = self.bea_host.sh_distro(log) if distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s]", "('SELECT * FROM \"%s\"%s ORDER BY time DESC LIMIT 1;' % (measurement_name, tag_string))", "data[\"results\"] if len(results) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [results] is not", "command = (\"hostname\") retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run command", "agent \"\"\" host = self.bea_host log.cl_info(\"configuring Collectd on host [%s]\", host.sh_hostname) ret =", "if ret: log.cl_error(\"failed to send test config to Barreleye agent \" \"on host", "host.sh_hostname) return -1 log.cl_info(\"checking whether Influxdb can get data points from \" \"agent", "json_string = json.dumps(data, indent=4, separators=(',', ': ')) log.cl_debug(\"data: [%s]\", json_string) if \"results\" not", "distro = self.bea_host.sh_distro(log) if distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has unsupported", "return -1 def bea_collectd_stop(self, log): \"\"\" Stop Collectd service. \"\"\" service_name = \"collectd\"", "Return the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host = self.bea_host version = host.sh_rpm_version(log,", "in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [columns] in one \" \"of", "collectd_config = \\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret", "bea_generate_configs(self, log, barreleye_instance): \"\"\" Steps before configuring Barreleye agent \"\"\" ret = self._bea_sanity_check(log)", "the hostname is inconsistent with the configured hostname, # fqdn tag of the", "production \" \"usage\") return -1 self.bea_collectd_config_for_production = collectd_config # Check that needed collectd", "pycoral import utils from pycoral import lustre_version from pycoral import ssh_host from pybarreleye", "-1 return 0 def bea_collectd_start(self, log): \"\"\" Start Collectd service. \"\"\" service_name =", "to start [%s] service on agent host [%s]\", service_name, host.sh_hostname) return -1 return", "-1 self.bea_collectd_config_for_production = collectd_config # Check that needed collectd RPMs are installed for", "rpm_fnames, skip_kernel=True, skip_test=True) if version is None: log.cl_warning(\"failed to match Lustre version according", "config to Barreleye agent \" \"on host [%s]\", self.bea_host.sh_hostname) return -1 service_name =", "-1 results = data[\"results\"] if len(results) != 1: log.cl_debug(\"got wrong InfluxDB data [%s],", "InfluxDB data [%s], [values] is not a \" \"array with only one element\",", "if \"fqdn\" not in tags: tags[\"fqdn\"] = self.bea_host.sh_hostname ret = utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name,", "data [%s], [series] is not a \" \"array with only one element\", json_string)", "= self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if ret: log.cl_error(\"failed to send final Collectd config to", "return -1 serie_values = serie[\"values\"] if len(serie_values) != 1: log.cl_debug(\"got wrong InfluxDB data", "(\"ping -c 1 %s\" % barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed", "= self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log, query, epoch=\"s\") if response is None: log.cl_debug(\"failed to", "the data points will be unexpected. hostname = retval.cr_stdout.strip() if hostname != self.bea_host.sh_hostname:", "rpm_type in self.bea_needed_collectd_rpm_types: if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM [%s] of", "Collectd config \"\"\" if collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST else: interval = barreleye_instance.bei_collect_interval collectd_config", "= self.bea_host.sh_hostname ret = utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags)) if ret: log.cl_error(\"Influxdb gets no", "on host [%s], \" \"using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version return", "== \"\" and retval.cr_stderr == \"\"): log.cl_info(\"Lustre RPM is not installed on host", "agent \" \"[%s] with server\", self.bea_host.sh_hostname) return -1 distro = self.bea_host.sh_distro(log) if distro", "on host [%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1 return 0 def bea_config_agent(self,", "ret = self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed to check the connection of Barreleye agent", "tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log, query, epoch=\"s\") if response is None:", "cpu_target = self.bea_host.sh_target_cpu(log) if cpu_target is None: log.cl_error(\"failed to get target cpu on", "service on agent host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_start(self,", "retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 # If the hostname is inconsistent with the", "one element\", json_string) return -1 serie = series[0] if \"columns\" not in serie:", "tags): # pylint: disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether the datapoint is", "+= \".\" + host.sh_hostname collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\" ret = host.sh_send_file(log, fpath, etc_path)", "log, barreleye_instance): \"\"\" Steps before configuring Barreleye agent \"\"\" ret = self._bea_sanity_check(log) if", "return 0 if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host [%s], \"", "is not updated with query [%s]\", timestamp, query) return -1 def bea_influxdb_measurement_check(self, log,", "[%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 rpm_names =", "= [%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 def", "CPU type [%s]\", self.bea_host.sh_hostname, cpu_target) return -1 command = (\"hostname\") retval = self.bea_host.sh_run(log,", "ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Barreleye agent on host", "# pylint: disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether the datapoint is recieved", "log.cl_debug(\"timestamp [%d] is not updated with query [%s]\", timestamp, query) return -1 def", "no [columns] in one \" \"of the series\", json_string) return -1 columns =", "retval.cr_stderr) return -1 rpm_names = retval.cr_stdout.split() rpm_fnames = [] for rpm_name in rpm_names:", "return -1 ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Barreleye agent", "[%s], [values] is not a \" \"array with only one element\", json_string) return", "log, barreleye_instance, test_config=False): \"\"\" Dump and send the collectd.conf to the agent host", "self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed to config Lustre plugin", "service [%s] on host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_running(self,", "local host [%s] to \" \"directory [%s] on host [%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname,", "to be installed in this agent. self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The", "# Old Lustre kernel RPM might not be uninstalled ye, so ignore #", "not be uninstalled ye, so ignore # kernel RPMs. command = (\"rpm -qa", "pycoral import ssh_host from pybarreleye import barrele_collectd class BarreleAgent(): \"\"\" Each agent has", "self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed to check the connection of Barreleye agent \" \"[%s]", "found to be updated. self.bea_influxdb_update_time = None # Collectd config for test. Type:", "= self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if collectd_config is None: log.cl_error(\"failed to generate Collectd config", "collectd_config def bea_generate_configs(self, log, barreleye_instance): \"\"\" Steps before configuring Barreleye agent \"\"\" ret", "self.bea_host log.cl_info(\"configuring Collectd on host [%s]\", host.sh_hostname) ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if", "retval = self.bea_host.sh_run(log, command) if (retval.cr_exit_status == 1 and retval.cr_stdout == \"\" and", "from pycoral import ssh_host from pybarreleye import barrele_collectd class BarreleAgent(): \"\"\" Each agent", "\".\" + host.sh_hostname collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\" ret = host.sh_send_file(log, fpath, etc_path) if", "1 %s\" % barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run", "self.bea_host.sh_hostname ret = utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags)) if ret: log.cl_error(\"Influxdb gets no data", "no [time] in \" \"the columns\", json_string) return -1 timestamp = int(value[time_index]) if", "version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host = self.bea_host version = host.sh_rpm_version(log, \"collectd-\") if version", "self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The last timestamp when a measurement has", "with only one element\", json_string) return -1 serie = series[0] if \"columns\" not", "on host [%s]\", host.sh_hostname) return -1 log.cl_info(\"checking whether Influxdb can get data points", "enable_lustre_oss # Whether to collect Lustre MDS metrics from this agent. self.bea_enable_lustre_mds =", "ret = utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags)) if ret: log.cl_error(\"Influxdb gets no data point", "\"\"\" Stop Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host ret =", "host.sh_service_enable(log, service_name) if ret: log.cl_error(\"failed to enable service [%s] on host [%s]\", service_name,", "command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 rpm_names = retval.cr_stdout.split() rpm_fnames = []", "log.cl_debug(\"got wrong InfluxDB data [%s], no [series] in one \" \"of the result\",", "elif timestamp > self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp [%d] is not updated with query", "type [%s]\", self.bea_host.sh_hostname, cpu_target) return -1 command = (\"hostname\") retval = self.bea_host.sh_run(log, command)", "hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s] of Barreleye agent \" \"host [%s]\", hostname,", "agent. self.bea_enable_lustre_mds = enable_lustre_mds # Whether to collect Lustre client metrics from this", "[%s], \" \"ret = [%d], stdout = [%s], stderr = [%s]\", command, self.bea_host.sh_hostname,", "Barreleye \" \"agent [%s]\", self.bea_host.sh_hostname) return -1 collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if", "has unsupported distro [%s]\", self.bea_host.sh_hostname, distro) return -1 cpu_target = self.bea_host.sh_target_cpu(log) if cpu_target", "distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has unsupported distro [%s]\", self.bea_host.sh_hostname, distro)", "Whether to collect Lustre client metrics from this agent. self.bea_enable_lustre_client = enable_lustre_client #", "-1 ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Barreleye agent on", "return -1 ret = host.sh_service_enable(log, service_name) if ret: log.cl_error(\"failed to enable service [%s]", "log): \"\"\" Return the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host = self.bea_host version", "to RPM \" \"names on host [%s], using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version", "\"of the series\", json_string) return -1 columns = serie[\"columns\"] if \"values\" not in", "check the connection of Barreleye agent \" \"[%s] with server\", self.bea_host.sh_hostname) return -1", "None def _bea_check_connection_with_server(self, log): # The client might has problem to access Barreyele", "monitoring system for Lustre. \"\"\" import json from http import HTTPStatus from pycoral", "barreleye_instance, collectd_test=False) if collectd_config is None: log.cl_error(\"failed to generate Collectd config for production", "[%s]\", timestamp, query) return -1 def bea_influxdb_measurement_check(self, log, measurement_name, **tags): \"\"\" Check whether", "[%s] of Barreleye agent \" \"host [%s]\", hostname, self.bea_host.sh_hostname) return -1 return 0", "= collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed to config", "from this agent. self.bea_enable_disk = enable_disk # Whether to collect Lustre OSS metrics", "the # problem as early as possible. barreleye_server = self.bea_barreleye_server command = (\"ping", "self.bea_lustre_version = None # Collectd RPMs needed to be installed in this agent.", "-1 return 0 def bea_collectd_running(self, log): \"\"\" Check whether the Collectd is running.", "client might has problem to access Barreyele server, find the # problem as", "default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version return 0 if retval.cr_exit_status: log.cl_error(\"failed to", "collectd.conf to the agent host \"\"\" host = self.bea_host fpath = barreleye_instance.bei_workspace +", "Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host ret = host.sh_service_start(log, service_name)", "query, epoch=\"s\") if response is None: log.cl_debug(\"failed to with query Influxdb with query", "[%s]\", host.sh_hostname) return -1 ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if ret: log.cl_error(\"failed to", "interval = barrele_collectd.COLLECTD_INTERVAL_TEST else: interval = barreleye_instance.bei_collect_interval collectd_config = \\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern)", "data points will be unexpected. hostname = retval.cr_stdout.strip() if hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent", "might not be uninstalled ye, so ignore # kernel RPMs. command = (\"rpm", "ORDER BY time DESC LIMIT 1;' % (measurement_name, tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client response", "data points from \" \"agent [%s]\", host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb", "== -1: log.cl_debug(\"got wrong InfluxDB data [%s], no [time] in \" \"the columns\",", "with query [%s]\", timestamp, query) return -1 def bea_influxdb_measurement_check(self, log, measurement_name, **tags): \"\"\"", "rpm_fnames.append(rpm_name + \".rpm\") version, _ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True) if version is", "= serie[\"columns\"] if \"values\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no", "has problem to access Barreyele server, find the # problem as early as", "Type: CollectdConfig self.bea_collectd_config_for_test = None # Collectd config for production. Type: CollectdConfig self.bea_collectd_config_for_production", "influxdb_client.bic_query(log, query, epoch=\"s\") if response is None: log.cl_debug(\"failed to with query Influxdb with", "disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host, barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): # Barreleye server", "needed collectd RPMs are installed for rpm_type in self.bea_needed_collectd_rpm_types: if rpm_type not in", "None: log.cl_error(\"failed to generate Collectd config for production \" \"usage\") return -1 self.bea_collectd_config_for_production", "\" \"exist\", rpm_type, self.bea_host.sh_hostname) return -1 return 0 def _bea_influxdb_measurement_check(self, log, measurement_name, tags):", "enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): # Barreleye server with thye of BarreleServer self.bea_barreleye_server = barreleye_server", "wrong InfluxDB data [%s], no [time] in \" \"the columns\", json_string) return -1", "log.cl_error(\"failed to send file [%s] on local host [%s] to \" \"directory [%s]", "version return 0 def _bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False): \"\"\" Generate Collectd config \"\"\"", "a measurement has been found to be updated. self.bea_influxdb_update_time = None # Collectd", "retval.cr_stdout, retval.cr_stderr) return -1 return 0 def _bea_sanity_check(self, log): \"\"\" Sanity check of", "1 if retval.cr_stdout == \"unknown\\n\": return 0 if retval.cr_stdout == \"inactive\\n\": return 0", "Return 1 if running. Return -1 if failure. \"\"\" command = \"systemctl is-active", "with query [%s]\", response.status_code, query) return -1 data = response.json() json_string = json.dumps(data,", "ret: log.cl_error(\"Influxdb doesn't have expected data points from \" \"agent [%s]\", host.sh_hostname) return", "rpm_names = retval.cr_stdout.split() rpm_fnames = [] for rpm_name in rpm_names: rpm_fnames.append(rpm_name + \".rpm\")", "HTTPStatus.OK: log.cl_debug(\"got InfluxDB status [%d] with query [%s]\", response.status_code, query) return -1 data", "barreleye_instance, test_config=True) if ret: log.cl_error(\"failed to send test config to Barreleye agent \"", "column == \"time\": time_index = i break i += 1 if time_index ==", "return 0 def _bea_influxdb_measurement_check(self, log, measurement_name, tags): # pylint: disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements", "InfluxDB data [%s], [series] is not a \" \"array with only one element\",", "query) return -1 data = response.json() json_string = json.dumps(data, indent=4, separators=(',', ': '))", "= \"collectd\" host = self.bea_host ret = host.sh_service_start(log, service_name) if ret: log.cl_error(\"failed to", "enable_infiniband # Lustre version on this host. self.bea_lustre_version = None # Collectd RPMs", "return 1 if retval.cr_stdout == \"unknown\\n\": return 0 if retval.cr_stdout == \"inactive\\n\": return", "production. Type: CollectdConfig self.bea_collectd_config_for_production = None def _bea_check_connection_with_server(self, log): # The client might", "Whether to collect Lustre MDS metrics from this agent. self.bea_enable_lustre_mds = enable_lustre_mds #", "[barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The last timestamp when a measurement has been found to", "return -1 return 0 def bea_collectd_version(self, log): \"\"\" Return the Collectd version, e.g.", "if len(serie_values) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [values] is not a", "return 0 def _bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\" Check the Lustre version according to", "on Barreleye \" \"agent [%s]\", self.bea_host.sh_hostname) return -1 collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True)", "= [%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 rpm_names", "\"collectd\" ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Collectd service on", "serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [columns] in one \" \"of the", "[%s]\", host.sh_hostname) return -1 ret = host.sh_service_enable(log, service_name) if ret: log.cl_error(\"failed to enable", "= self.bea_collectd_config_for_test else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production fpath += \".\" +", "time_index == -1: log.cl_debug(\"got wrong InfluxDB data [%s], no [time] in \" \"the", "lustre | grep -v kernel\") retval = self.bea_host.sh_run(log, command) if (retval.cr_exit_status == 1", "self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye agent host [%s] is insane\", self.bea_host.sh_hostname) return -1 ret", "= enable_infiniband # Lustre version on this host. self.bea_lustre_version = None # Collectd", "\"\"\" host = self.bea_host version = host.sh_rpm_version(log, \"collectd-\") if version is None: log.cl_error(\"failed", "data [%s], no [columns] in one \" \"of the series\", json_string) return -1", "cpu_target != \"x86_64\": log.cl_error(\"host [%s] has unsupported CPU type [%s]\", self.bea_host.sh_hostname, cpu_target) return", "log.cl_error(\"failed to generate Collectd config for test\") return -1 self.bea_collectd_config_for_test = collectd_config collectd_config", "a \" \"array with only one element\", json_string) return -1 result = results[0]", "return 0 def bea_collectd_version(self, log): \"\"\" Return the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\"", "to get target cpu on host [%s]\", self.bea_host.sh_hostname) return -1 if cpu_target !=", "if response.status_code != HTTPStatus.OK: log.cl_debug(\"got InfluxDB status [%d] with query [%s]\", response.status_code, query)", "barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM [%s] of agent [%s] does not \" \"exist\", rpm_type,", "+= (\" %s = '%s'\" % (key, value)) query = ('SELECT * FROM", "gets no data point for measurement [%s] \" \"from agent [%s]\", measurement_name, self.bea_host.sh_hostname)", "agent on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_enable(log, service_name) if ret:", "'%s'\" % (key, value)) query = ('SELECT * FROM \"%s\"%s ORDER BY time", "to with query Influxdb with query [%s]\", query) return -1 if response.status_code !=", "[%d] is not updated with query [%s]\", timestamp, query) return -1 def bea_influxdb_measurement_check(self,", "self.bea_host.sh_hostname) return -1 return 0 def _bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\" Check the Lustre", "etc_path = \"/etc/collectd.conf\" ret = host.sh_send_file(log, fpath, etc_path) if ret: log.cl_error(\"failed to send", "log.cl_error(\"host [%s] has unsupported CPU type [%s]\", self.bea_host.sh_hostname, cpu_target) return -1 command =", "\" \"the columns\", json_string) return -1 timestamp = int(value[time_index]) if self.bea_influxdb_update_time is None:", "retval.cr_stderr) return -1 return 0 def _bea_sanity_check(self, log): \"\"\" Sanity check of the", "collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if collectd_config is None: log.cl_error(\"failed to generate Collectd", "from this agent. self.bea_enable_lustre_client = enable_lustre_client # Whether to collect Infiniband metrics from", "= [%d], stdout = [%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr)", "of the data points will be unexpected. hostname = retval.cr_stdout.strip() if hostname !=", "collectd_config = self.bea_collectd_config_for_production fpath += \".\" + host.sh_hostname collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\" ret", "# kernel RPMs. command = (\"rpm -qa | grep lustre | grep -v", "barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed to check the Lustre version on Barreleye \" \"agent", "[%s], [series] is not a \" \"array with only one element\", json_string) return", "pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host, barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): # Barreleye", "value = serie_values[0] time_index = -1 i = 0 for column in columns:", "= \" WHERE\" tag_string += (\" %s = '%s'\" % (key, value)) query", "data [%s], no [series] in one \" \"of the result\", json_string) return -1", "= [] for rpm_name in rpm_names: rpm_fnames.append(rpm_name + \".rpm\") version, _ = lustre_version.match_lustre_version_from_rpms(log,", "in tags.items(): if tag_string != \"\": tag_string += \" AND\" else: tag_string =", "problem to access Barreyele server, find the # problem as early as possible.", "data: log.cl_debug(\"got wrong InfluxDB data [%s], no [results]\", json_string) return -1 results =", "\"host [%s]\", hostname, self.bea_host.sh_hostname) return -1 return 0 def _bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\"", "if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host [%s], \" \"ret =", "ret: log.cl_error(\"Influxdb gets no data point for measurement [%s] \" \"from agent [%s]\",", "failure. \"\"\" command = \"systemctl is-active collectd\" retval = self.bea_host.sh_run(log, command) if retval.cr_stdout", "metrics from this agent. self.bea_enable_infiniband = enable_infiniband # Lustre version on this host.", "None: self.bea_influxdb_update_time = timestamp elif timestamp > self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp [%d] is", "')) log.cl_debug(\"data: [%s]\", json_string) if \"results\" not in data: log.cl_debug(\"got wrong InfluxDB data", "Stop Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host ret = host.sh_service_stop(log,", "Barreleye is a performance monitoring system for Lustre. \"\"\" import json from http", "InfluxDB data [%s], no [columns] in one \" \"of the series\", json_string) return", "return 0 def bea_collectd_start(self, log): \"\"\" Start Collectd service. \"\"\" service_name = \"collectd\"", "collectd_config is None: log.cl_error(\"failed to generate Collectd config for production \" \"usage\") return", "[%d] with query [%s]\", response.status_code, query) return -1 data = response.json() json_string =", "return -1 if cpu_target != \"x86_64\": log.cl_error(\"host [%s] has unsupported CPU type [%s]\",", "if cpu_target != \"x86_64\": log.cl_error(\"host [%s] has unsupported CPU type [%s]\", self.bea_host.sh_hostname, cpu_target)", "command) if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host [%s], \" \"ret", "of BarreleServer self.bea_barreleye_server = barreleye_server # Host to run commands. self.bea_host = host", "': ')) log.cl_debug(\"data: [%s]\", json_string) if \"results\" not in data: log.cl_debug(\"got wrong InfluxDB", "bea_collectd_start(self, log): \"\"\" Start Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host", "test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config =", "-v kernel\") retval = self.bea_host.sh_run(log, command) if (retval.cr_exit_status == 1 and retval.cr_stdout ==", "config for test\") return -1 self.bea_collectd_config_for_test = collectd_config collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False)", "indent=4, separators=(',', ': ')) log.cl_debug(\"data: [%s]\", json_string) if \"results\" not in data: log.cl_debug(\"got", "version is None: log.cl_error(\"failed to get the Collectd RPM version on host [%s]\",", "return -1 value = serie_values[0] time_index = -1 i = 0 for column", "# Collectd config for production. Type: CollectdConfig self.bea_collectd_config_for_production = None def _bea_check_connection_with_server(self, log):", "\"\"\" host = self.bea_host log.cl_info(\"configuring Collectd on host [%s]\", host.sh_hostname) ret = self.bea_collectd_send_config(log,", "\"\"\" # pylint: disable=too-many-return-statements,too-many-branches # Old Lustre kernel RPM might not be uninstalled", "[results]\", json_string) return -1 results = data[\"results\"] if len(results) != 1: log.cl_debug(\"got wrong", "_bea_sanity_check(self, log): \"\"\" Sanity check of the host before installation \"\"\" ret =", "barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version,", "ret = host.sh_service_enable(log, service_name) if ret: log.cl_error(\"failed to enable service [%s] on host", "(measurement_name, tags)) if ret: log.cl_error(\"Influxdb gets no data point for measurement [%s] \"", "if collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST else: interval = barreleye_instance.bei_collect_interval collectd_config = \\ barrele_collectd.CollectdConfig(self,", "i += 1 if time_index == -1: log.cl_debug(\"got wrong InfluxDB data [%s], no", "command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 # If the hostname is inconsistent", "collect Infiniband metrics from this agent. self.bea_enable_infiniband = enable_infiniband # Lustre version on", "self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if ret: log.cl_error(\"failed to send test config to Barreleye agent", "-1 self.bea_collectd_config_for_test = collectd_config collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if collectd_config is None:", "self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log, query, epoch=\"s\") if response is None: log.cl_debug(\"failed to with", "def bea_collectd_running(self, log): \"\"\" Check whether the Collectd is running. Return 1 if", "can get data points from \" \"agent [%s]\", host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log) if", "one \" \"of the result\", json_string) return -1 series = result[\"series\"] if len(series)", "\"names on host [%s], using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version else:", "ret: log.cl_error(\"failed to restart Collectd service on host [%s]\", host.sh_hostname) return -1 log.cl_info(\"checking", "type \"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host, barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False,", "None: log.cl_error(\"failed to get target cpu on host [%s]\", self.bea_host.sh_hostname) return -1 if", "+= barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production fpath", "data = response.json() json_string = json.dumps(data, indent=4, separators=(',', ': ')) log.cl_debug(\"data: [%s]\", json_string)", "return -1 columns = serie[\"columns\"] if \"values\" not in serie: log.cl_debug(\"got wrong InfluxDB", "so ignore # kernel RPMs. command = (\"rpm -qa | grep lustre |", "-1 log.cl_info(\"checking whether Influxdb can get data points from \" \"agent [%s]\", host.sh_hostname)", "serie_values = serie[\"values\"] if len(serie_values) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [values]", "[%s], no [results]\", json_string) return -1 results = data[\"results\"] if len(results) != 1:", "tags: tags[\"fqdn\"] = self.bea_host.sh_hostname ret = utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags)) if ret: log.cl_error(\"Influxdb", "tag_string = \" WHERE\" tag_string += (\" %s = '%s'\" % (key, value))", "self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 return 0 def _bea_sanity_check(self, log): \"\"\" Sanity", "retval.cr_stdout, retval.cr_stderr) return -1 # If the hostname is inconsistent with the configured", "= serie_values[0] time_index = -1 i = 0 for column in columns: if", "Collectd RPMs needed to be installed in this agent. self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME,", "not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [columns] in one \"", "if running. Return -1 if failure. \"\"\" command = \"systemctl is-active collectd\" retval", "if len(series) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [series] is not a", "disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether the datapoint is recieved by InfluxDB \"\"\" tag_string =", "service_name = \"collectd\" ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Collectd", "json from http import HTTPStatus from pycoral import utils from pycoral import lustre_version", "is None: log.cl_error(\"failed to get target cpu on host [%s]\", self.bea_host.sh_hostname) return -1", "host = self.bea_host ret = host.sh_service_stop(log, service_name) if ret: log.cl_error(\"failed to stop [%s]", "enable_lustre_client=False, enable_infiniband=False): # Barreleye server with thye of BarreleServer self.bea_barreleye_server = barreleye_server #", "return 0 if retval.cr_stdout == \"inactive\\n\": return 0 log.cl_error(\"unexpected stdout of command [%s]", "on this host. self.bea_lustre_version = None # Collectd RPMs needed to be installed", "break i += 1 if time_index == -1: log.cl_debug(\"got wrong InfluxDB data [%s],", "\" AND\" else: tag_string = \" WHERE\" tag_string += (\" %s = '%s'\"", "self.bea_host.sh_hostname, cpu_target) return -1 command = (\"hostname\") retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status:", "bea_collectd_version(self, log): \"\"\" Return the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host = self.bea_host", "= \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The last timestamp when a measurement has been", "fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production", "return 0 def _bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False): \"\"\" Generate Collectd config \"\"\" if", "bea_collectd_running(self, log): \"\"\" Check whether the Collectd is running. Return 1 if running.", "results[0] if \"series\" not in result: log.cl_debug(\"got wrong InfluxDB data [%s], no [series]", "one \" \"of the series\", json_string) return -1 columns = serie[\"columns\"] if \"values\"", "# Collectd config for test. Type: CollectdConfig self.bea_collectd_config_for_test = None # Collectd config", "no [values] in one \" \"of the series\", json_string) return -1 serie_values =", "agent [%s]\", measurement_name, self.bea_host.sh_hostname) return -1 return 0 def bea_collectd_send_config(self, log, barreleye_instance, test_config=False):", "= self.bea_host log.cl_info(\"configuring Collectd on host [%s]\", host.sh_hostname) ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=True)", "\"\"\" Each agent has an object of this type \"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes", "barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds,", "if \"results\" not in data: log.cl_debug(\"got wrong InfluxDB data [%s], no [results]\", json_string)", "e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host = self.bea_host version = host.sh_rpm_version(log, \"collectd-\") if version is", "= [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 def bea_collectd_stop(self, log): \"\"\"", "if version is None: log.cl_warning(\"failed to match Lustre version according to RPM \"", "return -1 if response.status_code != HTTPStatus.OK: log.cl_debug(\"got InfluxDB status [%d] with query [%s]\",", "\"\"\" import json from http import HTTPStatus from pycoral import utils from pycoral", "log.cl_error(\"needed Collectd RPM [%s] of agent [%s] does not \" \"exist\", rpm_type, self.bea_host.sh_hostname)", "is None: self.bea_influxdb_update_time = timestamp elif timestamp > self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp [%d]", "connection of Barreleye agent \" \"[%s] with server\", self.bea_host.sh_hostname) return -1 distro =", "log.cl_debug(\"got wrong InfluxDB data [%s], [results] is not a \" \"array with only", "send test config to Barreleye agent \" \"on host [%s]\", self.bea_host.sh_hostname) return -1", "-1 rpm_names = retval.cr_stdout.split() rpm_fnames = [] for rpm_name in rpm_names: rpm_fnames.append(rpm_name +", "[values] in one \" \"of the series\", json_string) return -1 serie_values = serie[\"values\"]", "DESC LIMIT 1;' % (measurement_name, tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log, query,", "in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has unsupported distro [%s]\", self.bea_host.sh_hostname, distro) return -1", "InfluxDB data [%s], no [values] in one \" \"of the series\", json_string) return", "as possible. barreleye_server = self.bea_barreleye_server command = (\"ping -c 1 %s\" % barreleye_server.bes_server_host.sh_hostname)", "\"\"\" if collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST else: interval = barreleye_instance.bei_collect_interval collectd_config = \\", "[%s]\", hostname, self.bea_host.sh_hostname) return -1 return 0 def _bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\" Check", "Collectd on host [%s]\", host.sh_hostname) ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if ret: log.cl_error(\"failed", "[%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_start(self, log): \"\"\" Start Collectd", "tag_string += (\" %s = '%s'\" % (key, value)) query = ('SELECT *", "self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 # If the hostname is inconsistent with", "response.status_code != HTTPStatus.OK: log.cl_debug(\"got InfluxDB status [%d] with query [%s]\", response.status_code, query) return", "test config to Barreleye agent \" \"on host [%s]\", self.bea_host.sh_hostname) return -1 service_name", "on host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_running(self, log): \"\"\"", "in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM [%s] of agent [%s] does not \" \"exist\",", "\" \"agent on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_restart(log, service_name) if", "self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if collectd_config is None: log.cl_error(\"failed to generate Collectd config for", "\"collectd\" host = self.bea_host ret = host.sh_service_stop(log, service_name) if ret: log.cl_error(\"failed to stop", "[%s]\", host.sh_hostname) return -1 ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart", "from this agent. self.bea_enable_lustre_oss = enable_lustre_oss # Whether to collect Lustre MDS metrics", "+ host.sh_hostname collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\" ret = host.sh_send_file(log, fpath, etc_path) if ret:", "= host.sh_service_stop(log, service_name) if ret: log.cl_error(\"failed to stop [%s] service on agent host", "= host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Collectd service on host [%s]\",", "= enable_lustre_oss # Whether to collect Lustre MDS metrics from this agent. self.bea_enable_lustre_mds", "Start Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host ret = host.sh_service_start(log,", "host \"\"\" host = self.bea_host fpath = barreleye_instance.bei_workspace + \"/\" if test_config: fpath", "\" \"host [%s]\", hostname, self.bea_host.sh_hostname) return -1 return 0 def _bea_check_lustre_version(self, log, lustre_fallback_version):", "host.sh_hostname) return -1 ret = host.sh_service_enable(log, service_name) if ret: log.cl_error(\"failed to enable service", "and retval.cr_stdout == \"\" and retval.cr_stderr == \"\"): log.cl_info(\"Lustre RPM is not installed", "# If the hostname is inconsistent with the configured hostname, # fqdn tag", "if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM [%s] of agent [%s] does", "def bea_collectd_version(self, log): \"\"\" Return the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host =", "collectd_test=False): \"\"\" Generate Collectd config \"\"\" if collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST else: interval", "pybarreleye import barrele_collectd class BarreleAgent(): \"\"\" Each agent has an object of this", "to collect Lustre MDS metrics from this agent. self.bea_enable_lustre_mds = enable_lustre_mds # Whether", "to access Barreyele server, find the # problem as early as possible. barreleye_server", "timestamp = int(value[time_index]) if self.bea_influxdb_update_time is None: self.bea_influxdb_update_time = timestamp elif timestamp >", "RPMs are installed for rpm_type in self.bea_needed_collectd_rpm_types: if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed", "(measurement_name, tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log, query, epoch=\"s\") if response is", "[%s], using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version else: log.cl_info(\"detected Lustre version", "series = result[\"series\"] if len(series) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [series]", "host.sh_hostname) return -1 return 0 def bea_config_agent(self, log, barreleye_instance): \"\"\" Configure agent \"\"\"", "import barrele_collectd class BarreleAgent(): \"\"\" Each agent has an object of this type", "\"\"\" service_name = \"collectd\" host = self.bea_host ret = host.sh_service_stop(log, service_name) if ret:", "ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if ret: log.cl_error(\"failed to send test config to", "send final Collectd config to Barreleye \" \"agent on host [%s]\", host.sh_hostname) return", "self.bea_host.sh_hostname) return -1 ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed to check the", "possible. barreleye_server = self.bea_barreleye_server command = (\"ping -c 1 %s\" % barreleye_server.bes_server_host.sh_hostname) retval", "collectd_config # Check that needed collectd RPMs are installed for rpm_type in self.bea_needed_collectd_rpm_types:", "Barreleye agent \" \"[%s] with server\", self.bea_host.sh_hostname) return -1 distro = self.bea_host.sh_distro(log) if", "0 if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host [%s], \" \"ret", "self.bea_host.sh_hostname) return -1 service_name = \"collectd\" ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed", "return -1 data = response.json() json_string = json.dumps(data, indent=4, separators=(',', ': ')) log.cl_debug(\"data:", "[%s]\", query) return -1 if response.status_code != HTTPStatus.OK: log.cl_debug(\"got InfluxDB status [%d] with", "\"results\" not in data: log.cl_debug(\"got wrong InfluxDB data [%s], no [results]\", json_string) return", "service_name) if ret: log.cl_error(\"failed to start [%s] service on agent host [%s]\", service_name,", "self.bea_collectd_config_for_production = None def _bea_check_connection_with_server(self, log): # The client might has problem to", "be uninstalled ye, so ignore # kernel RPMs. command = (\"rpm -qa |", "self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if", "The client might has problem to access Barreyele server, find the # problem", "self.bea_host.sh_run(log, command) if (retval.cr_exit_status == 1 and retval.cr_stdout == \"\" and retval.cr_stderr ==", "barrele_collectd.COLLECTD_TYPE_NAME] # The last timestamp when a measurement has been found to be", "log, barreleye_instance, collectd_test=False): \"\"\" Generate Collectd config \"\"\" if collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST", "host = self.bea_host version = host.sh_rpm_version(log, \"collectd-\") if version is None: log.cl_error(\"failed to", "[values] is not a \" \"array with only one element\", json_string) return -1", "\"\" and retval.cr_stderr == \"\"): log.cl_info(\"Lustre RPM is not installed on host [%s],", "if ret: log.cl_error(\"failed to send final Collectd config to Barreleye \" \"agent on", "-1 return 0 def _bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\" Check the Lustre version according", "json_string) return -1 columns = serie[\"columns\"] if \"values\" not in serie: log.cl_debug(\"got wrong", "log.cl_error(\"failed to restart Collectd service on host [%s]\", host.sh_hostname) return -1 log.cl_info(\"checking whether", "= host.sh_rpm_version(log, \"collectd-\") if version is None: log.cl_error(\"failed to get the Collectd RPM", "check of the host before installation \"\"\" ret = self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed", "the collectd.conf to the agent host \"\"\" host = self.bea_host fpath = barreleye_instance.bei_workspace", "return 0 def bea_collectd_send_config(self, log, barreleye_instance, test_config=False): \"\"\" Dump and send the collectd.conf", "CollectdConfig self.bea_collectd_config_for_production = None def _bea_check_connection_with_server(self, log): # The client might has problem", "= '%s'\" % (key, value)) query = ('SELECT * FROM \"%s\"%s ORDER BY", "to collect Lustre client metrics from this agent. self.bea_enable_lustre_client = enable_lustre_client # Whether", "for rpm_name in rpm_names: rpm_fnames.append(rpm_name + \".rpm\") version, _ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True,", "self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed to check the Lustre version on Barreleye \"", "Barreleye agent \" \"host [%s]\", hostname, self.bea_host.sh_hostname) return -1 return 0 def _bea_check_lustre_version(self,", "if hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s] of Barreleye agent \" \"host [%s]\",", "ret = host.sh_service_start(log, service_name) if ret: log.cl_error(\"failed to start [%s] service on agent", "fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1 return 0 def bea_config_agent(self, log, barreleye_instance): \"\"\"", "from this agent. self.bea_enable_lustre_mds = enable_lustre_mds # Whether to collect Lustre client metrics", "return -1 serie = series[0] if \"columns\" not in serie: log.cl_debug(\"got wrong InfluxDB", "ret: log.cl_error(\"failed to send test config to Barreleye agent \" \"on host [%s]\",", "1 and retval.cr_stdout == \"\" and retval.cr_stderr == \"\"): log.cl_info(\"Lustre RPM is not", "early as possible. barreleye_server = self.bea_barreleye_server command = (\"ping -c 1 %s\" %", "return -1 return 0 def bea_config_agent(self, log, barreleye_instance): \"\"\" Configure agent \"\"\" host", "cpu_target is None: log.cl_error(\"failed to get target cpu on host [%s]\", self.bea_host.sh_hostname) return", "command = (\"ping -c 1 %s\" % barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log, command) if", "the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host = self.bea_host version = host.sh_rpm_version(log, \"collectd-\")", "hostname is inconsistent with the configured hostname, # fqdn tag of the data", "ye, so ignore # kernel RPMs. command = (\"rpm -qa | grep lustre", "-1 data = response.json() json_string = json.dumps(data, indent=4, separators=(',', ': ')) log.cl_debug(\"data: [%s]\",", "[%s], [results] is not a \" \"array with only one element\", json_string) return", "of agent [%s] does not \" \"exist\", rpm_type, self.bea_host.sh_hostname) return -1 return 0", "= self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if collectd_config is None: log.cl_error(\"failed to generate Collectd config", "= self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb doesn't have expected data points from \" \"agent", "result = results[0] if \"series\" not in result: log.cl_debug(\"got wrong InfluxDB data [%s],", "hostname = retval.cr_stdout.strip() if hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s] of Barreleye agent", "if (retval.cr_exit_status == 1 and retval.cr_stdout == \"\" and retval.cr_stderr == \"\"): log.cl_info(\"Lustre", "-1 return 0 def bea_collectd_send_config(self, log, barreleye_instance, test_config=False): \"\"\" Dump and send the", "[%s] is insane\", self.bea_host.sh_hostname) return -1 ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed", "with the configured hostname, # fqdn tag of the data points will be", "datapoint is recieved by InfluxDB \"\"\" tag_string = \"\" for key, value in", "WHERE\" tag_string += (\" %s = '%s'\" % (key, value)) query = ('SELECT", "result\", json_string) return -1 series = result[\"series\"] if len(series) != 1: log.cl_debug(\"got wrong", "\"from agent [%s]\", measurement_name, self.bea_host.sh_hostname) return -1 return 0 def bea_collectd_send_config(self, log, barreleye_instance,", "host before installation \"\"\" ret = self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed to check the", "barreleye_instance): \"\"\" Configure agent \"\"\" host = self.bea_host log.cl_info(\"configuring Collectd on host [%s]\",", "self.bea_barreleye_server = barreleye_server # Host to run commands. self.bea_host = host # Whether", "generate Collectd config for production \" \"usage\") return -1 self.bea_collectd_config_for_production = collectd_config #", "query) return -1 if response.status_code != HTTPStatus.OK: log.cl_debug(\"got InfluxDB status [%d] with query", "in \" \"the columns\", json_string) return -1 timestamp = int(value[time_index]) if self.bea_influxdb_update_time is", "Collectd config for production. Type: CollectdConfig self.bea_collectd_config_for_production = None def _bea_check_connection_with_server(self, log): #", "not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM [%s] of agent [%s] does not \"", "is None: log.cl_debug(\"failed to with query Influxdb with query [%s]\", query) return -1", "pylint: disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether the datapoint is recieved by", "for column in columns: if column == \"time\": time_index = i break i", "client metrics from this agent. self.bea_enable_lustre_client = enable_lustre_client # Whether to collect Infiniband", "this type \"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host, barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True,", "if ret: log.cl_error(\"failed to send file [%s] on local host [%s] to \"", "be unexpected. hostname = retval.cr_stdout.strip() if hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s] of", "retval.cr_stdout == \"\" and retval.cr_stderr == \"\"): log.cl_info(\"Lustre RPM is not installed on", "-1 def bea_influxdb_measurement_check(self, log, measurement_name, **tags): \"\"\" Check whether influxdb has datapoint \"\"\"", "etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1 return 0 def bea_config_agent(self, log, barreleye_instance): \"\"\" Configure", "\" \"on host [%s]\", self.bea_host.sh_hostname) return -1 service_name = \"collectd\" ret = host.sh_service_restart(log,", "\"on host [%s]\", self.bea_host.sh_hostname) return -1 service_name = \"collectd\" ret = host.sh_service_restart(log, service_name)", "one element\", json_string) return -1 value = serie_values[0] time_index = -1 i =", "json_string) return -1 results = data[\"results\"] if len(results) != 1: log.cl_debug(\"got wrong InfluxDB", "[%s]\", self.bea_host.sh_hostname) return -1 service_name = \"collectd\" ret = host.sh_service_restart(log, service_name) if ret:", "# Lustre version on this host. self.bea_lustre_version = None # Collectd RPMs needed", "host [%s]\", self.bea_host.sh_hostname) return -1 service_name = \"collectd\" ret = host.sh_service_restart(log, service_name) if", "return 0 def _bea_sanity_check(self, log): \"\"\" Sanity check of the host before installation", "\" WHERE\" tag_string += (\" %s = '%s'\" % (key, value)) query =", "pylint: disable=too-many-return-statements,too-many-branches # Old Lustre kernel RPM might not be uninstalled ye, so", "in one \" \"of the series\", json_string) return -1 columns = serie[\"columns\"] if", "is insane\", self.bea_host.sh_hostname) return -1 ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed to", "json_string) return -1 serie_values = serie[\"values\"] if len(serie_values) != 1: log.cl_debug(\"got wrong InfluxDB", "match Lustre version according to RPM \" \"names on host [%s], using default", "the series\", json_string) return -1 serie_values = serie[\"values\"] if len(serie_values) != 1: log.cl_debug(\"got", "return -1 return 0 def _bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\" Check the Lustre version", "version is None: log.cl_warning(\"failed to match Lustre version according to RPM \" \"names", "not \" \"exist\", rpm_type, self.bea_host.sh_hostname) return -1 return 0 def _bea_influxdb_measurement_check(self, log, measurement_name,", "\"ret = [%d], stdout = [%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout,", "[%s]\", response.status_code, query) return -1 data = response.json() json_string = json.dumps(data, indent=4, separators=(',',", "performance monitoring system for Lustre. \"\"\" import json from http import HTTPStatus from", "is not a \" \"array with only one element\", json_string) return -1 result", "agent [%s] does not \" \"exist\", rpm_type, self.bea_host.sh_hostname) return -1 return 0 def", "0 for column in columns: if column == \"time\": time_index = i break", "Lustre client metrics from this agent. self.bea_enable_lustre_client = enable_lustre_client # Whether to collect", "problem as early as possible. barreleye_server = self.bea_barreleye_server command = (\"ping -c 1", "len(series) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [series] is not a \"", "\"x86_64\": log.cl_error(\"host [%s] has unsupported CPU type [%s]\", self.bea_host.sh_hostname, cpu_target) return -1 command", "= [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 # If the hostname", "RPMs \"\"\" # pylint: disable=too-many-return-statements,too-many-branches # Old Lustre kernel RPM might not be", "!= 1: log.cl_debug(\"got wrong InfluxDB data [%s], [series] is not a \" \"array", "\"\"\" tag_string = \"\" for key, value in tags.items(): if tag_string != \"\":", "0 def bea_collectd_running(self, log): \"\"\" Check whether the Collectd is running. Return 1", "== 1 and retval.cr_stdout == \"\" and retval.cr_stderr == \"\"): log.cl_info(\"Lustre RPM is", "[%s] of agent [%s] does not \" \"exist\", rpm_type, self.bea_host.sh_hostname) return -1 return", "from this agent. self.bea_enable_infiniband = enable_infiniband # Lustre version on this host. self.bea_lustre_version", "None: log.cl_error(\"failed to generate Collectd config for test\") return -1 self.bea_collectd_config_for_test = collectd_config", "= ('SELECT * FROM \"%s\"%s ORDER BY time DESC LIMIT 1;' % (measurement_name,", "in one \" \"of the series\", json_string) return -1 serie_values = serie[\"values\"] if", "host = self.bea_host fpath = barreleye_instance.bei_workspace + \"/\" if test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME", "response.json() json_string = json.dumps(data, indent=4, separators=(',', ': ')) log.cl_debug(\"data: [%s]\", json_string) if \"results\"", "[%s] on host [%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version = version return 0 def _bea_generate_collectd_config(self,", "lustre_fallback_version): \"\"\" Check the Lustre version according to the installed RPMs \"\"\" #", "Barreleye agent \"\"\" ret = self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye agent host [%s] is", "return -1 return 0 def bea_collectd_start(self, log): \"\"\" Start Collectd service. \"\"\" service_name", "The last timestamp when a measurement has been found to be updated. self.bea_influxdb_update_time", "host [%s]\", self.bea_host.sh_hostname) return -1 if cpu_target != \"x86_64\": log.cl_error(\"host [%s] has unsupported", "config to Barreleye \" \"agent on host [%s]\", host.sh_hostname) return -1 ret =", "-1 ret = host.sh_service_enable(log, service_name) if ret: log.cl_error(\"failed to enable service [%s] on", "None # Collectd RPMs needed to be installed in this agent. self.bea_needed_collectd_rpm_types =", "[%s]\", host.sh_hostname) ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if ret: log.cl_error(\"failed to send test", "host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_enable(log, service_name) if ret: log.cl_error(\"failed to", "-1 series = result[\"series\"] if len(series) != 1: log.cl_debug(\"got wrong InfluxDB data [%s],", "log.cl_warning(\"failed to match Lustre version according to RPM \" \"names on host [%s],", "pycoral import lustre_version from pycoral import ssh_host from pybarreleye import barrele_collectd class BarreleAgent():", "-1 return 0 def _bea_sanity_check(self, log): \"\"\" Sanity check of the host before", "return 0 log.cl_error(\"unexpected stdout of command [%s] on host [%s], \" \"ret =", "return -1 def bea_influxdb_measurement_check(self, log, measurement_name, **tags): \"\"\" Check whether influxdb has datapoint", "result: log.cl_debug(\"got wrong InfluxDB data [%s], no [series] in one \" \"of the", "return -1 self.bea_collectd_config_for_production = collectd_config # Check that needed collectd RPMs are installed", "measurement_name, tags): # pylint: disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether the datapoint", "= None # Collectd config for test. Type: CollectdConfig self.bea_collectd_config_for_test = None #", "_bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False): \"\"\" Generate Collectd config \"\"\" if collectd_test: interval =", "measurement_name, **tags): \"\"\" Check whether influxdb has datapoint \"\"\" if \"fqdn\" not in", "BY time DESC LIMIT 1;' % (measurement_name, tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client response =", "ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Collectd service on host", "installed RPMs \"\"\" # pylint: disable=too-many-return-statements,too-many-branches # Old Lustre kernel RPM might not", "serie[\"columns\"] if \"values\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [values]", "log.cl_error(\"failed to send final Collectd config to Barreleye \" \"agent on host [%s]\",", "Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host ret = host.sh_service_stop(log, service_name)", "test_config=False) if ret: log.cl_error(\"failed to send final Collectd config to Barreleye \" \"agent", "config for production. Type: CollectdConfig self.bea_collectd_config_for_production = None def _bea_check_connection_with_server(self, log): # The", "json_string) return -1 timestamp = int(value[time_index]) if self.bea_influxdb_update_time is None: self.bea_influxdb_update_time = timestamp", "time_index = -1 i = 0 for column in columns: if column ==", "# Whether to collect Infiniband metrics from this agent. self.bea_enable_infiniband = enable_infiniband #", "is inconsistent with the configured hostname, # fqdn tag of the data points", "Check that needed collectd RPMs are installed for rpm_type in self.bea_needed_collectd_rpm_types: if rpm_type", "= enable_lustre_mds # Whether to collect Lustre client metrics from this agent. self.bea_enable_lustre_client", "before installation \"\"\" ret = self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed to check the connection", "None: log.cl_debug(\"failed to with query Influxdb with query [%s]\", query) return -1 if", "get data points from \" \"agent [%s]\", host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log) if ret:", "updated. self.bea_influxdb_update_time = None # Collectd config for test. Type: CollectdConfig self.bea_collectd_config_for_test =", "[%s], no [series] in one \" \"of the result\", json_string) return -1 series", "to send final Collectd config to Barreleye \" \"agent on host [%s]\", host.sh_hostname)", "not in result: log.cl_debug(\"got wrong InfluxDB data [%s], no [series] in one \"", "= None # Collectd RPMs needed to be installed in this agent. self.bea_needed_collectd_rpm_types", "host [%s], using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version else: log.cl_info(\"detected Lustre", "0 def bea_collectd_version(self, log): \"\"\" Return the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host", "-1 cpu_target = self.bea_host.sh_target_cpu(log) if cpu_target is None: log.cl_error(\"failed to get target cpu", "-1: log.cl_debug(\"got wrong InfluxDB data [%s], no [time] in \" \"the columns\", json_string)", "before configuring Barreleye agent \"\"\" ret = self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye agent host", "0 if retval.cr_stdout == \"inactive\\n\": return 0 log.cl_error(\"unexpected stdout of command [%s] on", "for rpm_type in self.bea_needed_collectd_rpm_types: if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM [%s]", "if retval.cr_stdout == \"active\\n\": return 1 if retval.cr_stdout == \"unknown\\n\": return 0 if", "= self.bea_host.sh_run(log, command) if retval.cr_stdout == \"active\\n\": return 1 if retval.cr_stdout == \"unknown\\n\":", "measurement has been found to be updated. self.bea_influxdb_update_time = None # Collectd config", "= data[\"results\"] if len(results) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [results] is", "measurement [%s] \" \"from agent [%s]\", measurement_name, self.bea_host.sh_hostname) return -1 return 0 def", "measurement_name, self.bea_host.sh_hostname) return -1 return 0 def bea_collectd_send_config(self, log, barreleye_instance, test_config=False): \"\"\" Dump", "-1 service_name = \"collectd\" ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart", "wrong InfluxDB data [%s], [results] is not a \" \"array with only one", "= collectd_config # Check that needed collectd RPMs are installed for rpm_type in", "self.bea_host.sh_hostname) return -1 return 0 def bea_collectd_send_config(self, log, barreleye_instance, test_config=False): \"\"\" Dump and", "= self.bea_collectd_config_for_production fpath += \".\" + host.sh_hostname collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\" ret =", "InfluxDB data [%s], no [time] in \" \"the columns\", json_string) return -1 timestamp", "Host to run commands. self.bea_host = host # Whether to collect disk metrics", "> self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp [%d] is not updated with query [%s]\", timestamp,", "Check whether the Collectd is running. Return 1 if running. Return -1 if", "Lustre version according to RPM \" \"names on host [%s], using default [%s]\",", "else: tag_string = \" WHERE\" tag_string += (\" %s = '%s'\" % (key,", "test_config=False): \"\"\" Dump and send the collectd.conf to the agent host \"\"\" host", "log.cl_info(\"detected Lustre version [%s] on host [%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version = version return", "host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Barreleye agent on host [%s]\", host.sh_hostname)", "default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version else: log.cl_info(\"detected Lustre version [%s] on", "will be unexpected. hostname = retval.cr_stdout.strip() if hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s]", "system for Lustre. \"\"\" import json from http import HTTPStatus from pycoral import", "= (\"rpm -qa | grep lustre | grep -v kernel\") retval = self.bea_host.sh_run(log,", "% (measurement_name, tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log, query, epoch=\"s\") if response", "for test. Type: CollectdConfig self.bea_collectd_config_for_test = None # Collectd config for production. Type:", "%s = '%s'\" % (key, value)) query = ('SELECT * FROM \"%s\"%s ORDER", "ssh_host from pybarreleye import barrele_collectd class BarreleAgent(): \"\"\" Each agent has an object", "enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): # Barreleye server with thye of BarreleServer self.bea_barreleye_server", "Barreyele server, find the # problem as early as possible. barreleye_server = self.bea_barreleye_server", "None: log.cl_warning(\"failed to match Lustre version according to RPM \" \"names on host", "\"\" for key, value in tags.items(): if tag_string != \"\": tag_string += \"", "-1 result = results[0] if \"series\" not in result: log.cl_debug(\"got wrong InfluxDB data", "data [%s], [results] is not a \" \"array with only one element\", json_string)", "uninstalled ye, so ignore # kernel RPMs. command = (\"rpm -qa | grep", "-1 collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if collectd_config is None: log.cl_error(\"failed to generate", "installed in this agent. self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The last timestamp", "return -1 result = results[0] if \"series\" not in result: log.cl_debug(\"got wrong InfluxDB", "on host [%s]\", self.bea_host.sh_hostname) return -1 if cpu_target != \"x86_64\": log.cl_error(\"host [%s] has", "\"active\\n\": return 1 if retval.cr_stdout == \"unknown\\n\": return 0 if retval.cr_stdout == \"inactive\\n\":", "agent. self.bea_enable_lustre_client = enable_lustre_client # Whether to collect Infiniband metrics from this agent.", "no [results]\", json_string) return -1 results = data[\"results\"] if len(results) != 1: log.cl_debug(\"got", "return None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config def bea_generate_configs(self, log, barreleye_instance): \"\"\" Steps", "Influxdb with query [%s]\", query) return -1 if response.status_code != HTTPStatus.OK: log.cl_debug(\"got InfluxDB", "= utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags)) if ret: log.cl_error(\"Influxdb gets no data point for", "-1 value = serie_values[0] time_index = -1 i = 0 for column in", "\"series\" not in result: log.cl_debug(\"got wrong InfluxDB data [%s], no [series] in one", "metrics from this agent. self.bea_enable_lustre_mds = enable_lustre_mds # Whether to collect Lustre client", "\"of the series\", json_string) return -1 serie_values = serie[\"values\"] if len(serie_values) != 1:", "serie = series[0] if \"columns\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s],", "\"the columns\", json_string) return -1 timestamp = int(value[time_index]) if self.bea_influxdb_update_time is None: self.bea_influxdb_update_time", "server, find the # problem as early as possible. barreleye_server = self.bea_barreleye_server command", "response.status_code, query) return -1 data = response.json() json_string = json.dumps(data, indent=4, separators=(',', ':", "ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed to", "[%s] does not \" \"exist\", rpm_type, self.bea_host.sh_hostname) return -1 return 0 def _bea_influxdb_measurement_check(self,", "[%s]\", self.bea_host.sh_hostname) return -1 if cpu_target != \"x86_64\": log.cl_error(\"host [%s] has unsupported CPU", "\" \"of the result\", json_string) return -1 series = result[\"series\"] if len(series) !=", "lustre_fallback_version else: log.cl_info(\"detected Lustre version [%s] on host [%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version =", "\"\"\" Generate Collectd config \"\"\" if collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST else: interval =", "-1 distro = self.bea_host.sh_distro(log) if distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has", "def _bea_check_connection_with_server(self, log): # The client might has problem to access Barreyele server,", "== \"time\": time_index = i break i += 1 if time_index == -1:", "[%s]\", json_string) if \"results\" not in data: log.cl_debug(\"got wrong InfluxDB data [%s], no", "= series[0] if \"columns\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no", "log): \"\"\" Start Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host ret", "= None # Collectd config for production. Type: CollectdConfig self.bea_collectd_config_for_production = None def", "for measurement [%s] \" \"from agent [%s]\", measurement_name, self.bea_host.sh_hostname) return -1 return 0", "0 def bea_config_agent(self, log, barreleye_instance): \"\"\" Configure agent \"\"\" host = self.bea_host log.cl_info(\"configuring", "# Check that needed collectd RPMs are installed for rpm_type in self.bea_needed_collectd_rpm_types: if", "+= barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production fpath += \".\" + host.sh_hostname collectd_config.cdc_dump(fpath) etc_path =", "command) if retval.cr_stdout == \"active\\n\": return 1 if retval.cr_stdout == \"unknown\\n\": return 0", "log.cl_debug(\"failed to with query Influxdb with query [%s]\", query) return -1 if response.status_code", "only one element\", json_string) return -1 value = serie_values[0] time_index = -1 i", "= [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 return 0 def _bea_sanity_check(self,", "[%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1 return 0 def bea_config_agent(self, log, barreleye_instance):", "[%s] to \" \"directory [%s] on host [%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return", "of command [%s] on host [%s], \" \"ret = [%d], stdout = [%s],", "if failure. \"\"\" command = \"systemctl is-active collectd\" retval = self.bea_host.sh_run(log, command) if", "run commands. self.bea_host = host # Whether to collect disk metrics from this", "pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether the datapoint is recieved by InfluxDB \"\"\" tag_string", "to config Lustre plugin of Collectd\") return None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config", "final Collectd config to Barreleye \" \"agent on host [%s]\", host.sh_hostname) return -1", "enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): # Barreleye server with thye of BarreleServer self.bea_barreleye_server =", "[%s] on host [%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1 return 0 def", "the series\", json_string) return -1 columns = serie[\"columns\"] if \"values\" not in serie:", "element\", json_string) return -1 result = results[0] if \"series\" not in result: log.cl_debug(\"got", "return -1 collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if collectd_config is None: log.cl_error(\"failed to", "collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed to config Lustre", "collectd_config.cdc_plugin_infiniband() return collectd_config def bea_generate_configs(self, log, barreleye_instance): \"\"\" Steps before configuring Barreleye agent", "metrics from this agent. self.bea_enable_disk = enable_disk # Whether to collect Lustre OSS", "ignore # kernel RPMs. command = (\"rpm -qa | grep lustre | grep", "host.sh_hostname) return -1 ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Barreleye", "data points from \" \"agent [%s]\", host.sh_hostname) return -1 ret = self.bea_collectd_send_config(log, barreleye_instance,", "import utils from pycoral import lustre_version from pycoral import ssh_host from pybarreleye import", "and retval.cr_stderr == \"\"): log.cl_info(\"Lustre RPM is not installed on host [%s], \"", "\" \"array with only one element\", json_string) return -1 result = results[0] if", "\" \"using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version return 0 if retval.cr_exit_status:", "self.bea_host.sh_target_cpu(log) if cpu_target is None: log.cl_error(\"failed to get target cpu on host [%s]\",", "barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production fpath += \".\" + host.sh_hostname collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\"", "insane\", self.bea_host.sh_hostname) return -1 ret = self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed to check", "element\", json_string) return -1 value = serie_values[0] time_index = -1 i = 0", "with query Influxdb with query [%s]\", query) return -1 if response.status_code != HTTPStatus.OK:", "[%s]\", host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb doesn't have expected data points", "host, barreleye_server, enable_disk=False, enable_lustre_oss=True, enable_lustre_mds=True, enable_lustre_client=False, enable_infiniband=False): # Barreleye server with thye of", "enable_lustre_mds # Whether to collect Lustre client metrics from this agent. self.bea_enable_lustre_client =", "doesn't have expected data points from \" \"agent [%s]\", host.sh_hostname) return -1 ret", "hostname, self.bea_host.sh_hostname) return -1 return 0 def _bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\" Check the", "the configured hostname, # fqdn tag of the data points will be unexpected.", "def _bea_influxdb_measurement_check(self, log, measurement_name, tags): # pylint: disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check", "# Whether to collect disk metrics from this agent. self.bea_enable_disk = enable_disk #", "None # Collectd config for test. Type: CollectdConfig self.bea_collectd_config_for_test = None # Collectd", "version according to the installed RPMs \"\"\" # pylint: disable=too-many-return-statements,too-many-branches # Old Lustre", "\"\"\" service_name = \"collectd\" host = self.bea_host ret = host.sh_service_start(log, service_name) if ret:", "retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 rpm_names = retval.cr_stdout.split() rpm_fnames = [] for rpm_name", "data [%s], no [values] in one \" \"of the series\", json_string) return -1", "in columns: if column == \"time\": time_index = i break i += 1", "1 if time_index == -1: log.cl_debug(\"got wrong InfluxDB data [%s], no [time] in", "[%s], \" \"using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version return 0 if", "log.cl_debug(\"got wrong InfluxDB data [%s], no [columns] in one \" \"of the series\",", "= lustre_fallback_version return 0 if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host", "host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to", "_bea_influxdb_measurement_check(self, log, measurement_name, tags): # pylint: disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether", "[%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version = version return 0 def _bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False):", "i break i += 1 if time_index == -1: log.cl_debug(\"got wrong InfluxDB data", "[%s] service on agent host [%s]\", service_name, host.sh_hostname) return -1 return 0 def", "from pybarreleye import barrele_collectd class BarreleAgent(): \"\"\" Each agent has an object of", "return -1 cpu_target = self.bea_host.sh_target_cpu(log) if cpu_target is None: log.cl_error(\"failed to get target", "self.bea_host.sh_run(log, command) if retval.cr_stdout == \"active\\n\": return 1 if retval.cr_stdout == \"unknown\\n\": return", "timestamp when a measurement has been found to be updated. self.bea_influxdb_update_time = None", "in data: log.cl_debug(\"got wrong InfluxDB data [%s], no [results]\", json_string) return -1 results", "of Barreleye agent \" \"[%s] with server\", self.bea_host.sh_hostname) return -1 distro = self.bea_host.sh_distro(log)", "log, lustre_fallback_version): \"\"\" Check the Lustre version according to the installed RPMs \"\"\"", "RPM is not installed on host [%s], \" \"using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name)", "if self.bea_influxdb_update_time is None: self.bea_influxdb_update_time = timestamp elif timestamp > self.bea_influxdb_update_time: return 0", "log.cl_error(\"failed to config Lustre plugin of Collectd\") return None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return", "updated with query [%s]\", timestamp, query) return -1 def bea_influxdb_measurement_check(self, log, measurement_name, **tags):", "separators=(',', ': ')) log.cl_debug(\"data: [%s]\", json_string) if \"results\" not in data: log.cl_debug(\"got wrong", "log.cl_error(\"unexpected stdout of command [%s] on host [%s], \" \"ret = [%d], stdout", "self.bea_host ret = host.sh_service_start(log, service_name) if ret: log.cl_error(\"failed to start [%s] service on", "file [%s] on local host [%s] to \" \"directory [%s] on host [%s]\",", "agent. self.bea_enable_lustre_oss = enable_lustre_oss # Whether to collect Lustre MDS metrics from this", "\"\"\" Return the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64 \"\"\" host = self.bea_host version =", "!= 1: log.cl_debug(\"got wrong InfluxDB data [%s], [values] is not a \" \"array", "is None: log.cl_warning(\"failed to match Lustre version according to RPM \" \"names on", "[ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has unsupported distro [%s]\", self.bea_host.sh_hostname, distro) return -1 cpu_target", "time DESC LIMIT 1;' % (measurement_name, tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log,", "retval.cr_stdout == \"active\\n\": return 1 if retval.cr_stdout == \"unknown\\n\": return 0 if retval.cr_stdout", "from http import HTTPStatus from pycoral import utils from pycoral import lustre_version from", "lustre_version from pycoral import ssh_host from pybarreleye import barrele_collectd class BarreleAgent(): \"\"\" Each", "\"collectd-\") if version is None: log.cl_error(\"failed to get the Collectd RPM version on", "the datapoint is recieved by InfluxDB \"\"\" tag_string = \"\" for key, value", "response is None: log.cl_debug(\"failed to with query Influxdb with query [%s]\", query) return", "fpath += \".\" + host.sh_hostname collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\" ret = host.sh_send_file(log, fpath,", "ret = host.sh_service_stop(log, service_name) if ret: log.cl_error(\"failed to stop [%s] service on agent", "-1 i = 0 for column in columns: if column == \"time\": time_index", "interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss,", "point for measurement [%s] \" \"from agent [%s]\", measurement_name, self.bea_host.sh_hostname) return -1 return", "0 def _bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\" Check the Lustre version according to the", "self.bea_influxdb_update_time = None # Collectd config for test. Type: CollectdConfig self.bea_collectd_config_for_test = None", "\" \"agent [%s]\", host.sh_hostname) return -1 ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if ret:", "RPM might not be uninstalled ye, so ignore # kernel RPMs. command =", "= enable_lustre_client # Whether to collect Infiniband metrics from this agent. self.bea_enable_infiniband =", "return -1 command = (\"hostname\") retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to", "stop [%s] service on agent host [%s]\", service_name, host.sh_hostname) return -1 return 0", "recieved by InfluxDB \"\"\" tag_string = \"\" for key, value in tags.items(): if", "host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Collectd service on host [%s]\", host.sh_hostname)", "enable_lustre_client # Whether to collect Infiniband metrics from this agent. self.bea_enable_infiniband = enable_infiniband", "if \"values\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [values] in", "if collectd_config is None: log.cl_error(\"failed to generate Collectd config for production \" \"usage\")", "or self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret:", "of Collectd\") return None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config def bea_generate_configs(self, log, barreleye_instance):", "in self.bea_needed_collectd_rpm_types: if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM [%s] of agent", "_ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True) if version is None: log.cl_warning(\"failed to match", "utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags)) if ret: log.cl_error(\"Influxdb gets no data point for measurement", "log.cl_error(\"failed to start [%s] service on agent host [%s]\", service_name, host.sh_hostname) return -1", "= self.bea_host version = host.sh_rpm_version(log, \"collectd-\") if version is None: log.cl_error(\"failed to get", "= 0 for column in columns: if column == \"time\": time_index = i", "MDS metrics from this agent. self.bea_enable_lustre_mds = enable_lustre_mds # Whether to collect Lustre", "according to the installed RPMs \"\"\" # pylint: disable=too-many-return-statements,too-many-branches # Old Lustre kernel", "have expected data points from \" \"agent [%s]\", host.sh_hostname) return -1 ret =", "Old Lustre kernel RPM might not be uninstalled ye, so ignore # kernel", "json_string) return -1 value = serie_values[0] time_index = -1 i = 0 for", "installed for rpm_type in self.bea_needed_collectd_rpm_types: if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM", "[%s] on local host [%s] to \" \"directory [%s] on host [%s]\", fpath,", "# Whether to collect Lustre OSS metrics from this agent. self.bea_enable_lustre_oss = enable_lustre_oss", "return -1 results = data[\"results\"] if len(results) != 1: log.cl_debug(\"got wrong InfluxDB data", "a \" \"array with only one element\", json_string) return -1 value = serie_values[0]", "[%s]\", self.bea_host.sh_hostname, distro) return -1 cpu_target = self.bea_host.sh_target_cpu(log) if cpu_target is None: log.cl_error(\"failed", "from \" \"agent [%s]\", host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb doesn't have", "enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed to config Lustre plugin of Collectd\") return None", "to run command [%s] on host [%s], \" \"ret = [%d], stdout =", "to Barreleye agent \" \"on host [%s]\", self.bea_host.sh_hostname) return -1 service_name = \"collectd\"", "wrong InfluxDB data [%s], no [values] in one \" \"of the series\", json_string)", "= barreleye_instance.bei_collect_interval collectd_config = \\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or", "log, measurement_name, tags): # pylint: disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether the", "not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [values] in one \"", "the installed RPMs \"\"\" # pylint: disable=too-many-return-statements,too-many-branches # Old Lustre kernel RPM might", "Barreleye \" \"agent on host [%s]\", host.sh_hostname) return -1 ret = host.sh_service_restart(log, service_name)", "host.sh_hostname collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\" ret = host.sh_send_file(log, fpath, etc_path) if ret: log.cl_error(\"failed", "wrong InfluxDB data [%s], [values] is not a \" \"array with only one", "= self.bea_host.sh_distro(log) if distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has unsupported distro", "plugin of Collectd\") return None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config def bea_generate_configs(self, log,", "in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [values] in one \" \"of", "= host.sh_send_file(log, fpath, etc_path) if ret: log.cl_error(\"failed to send file [%s] on local", "agent. self.bea_enable_disk = enable_disk # Whether to collect Lustre OSS metrics from this", "else: interval = barreleye_instance.bei_collect_interval collectd_config = \\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or", "\"of the result\", json_string) return -1 series = result[\"series\"] if len(series) != 1:", "\"agent [%s]\", host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb doesn't have expected data", "[columns] in one \" \"of the series\", json_string) return -1 columns = serie[\"columns\"]", "| grep -v kernel\") retval = self.bea_host.sh_run(log, command) if (retval.cr_exit_status == 1 and", "\"/etc/collectd.conf\" ret = host.sh_send_file(log, fpath, etc_path) if ret: log.cl_error(\"failed to send file [%s]", "Dump and send the collectd.conf to the agent host \"\"\" host = self.bea_host", "data [%s], no [time] in \" \"the columns\", json_string) return -1 timestamp =", "a \" \"array with only one element\", json_string) return -1 serie = series[0]", "self.bea_needed_collectd_rpm_types: if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM [%s] of agent [%s]", "= self.bea_host ret = host.sh_service_stop(log, service_name) if ret: log.cl_error(\"failed to stop [%s] service", "= host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Barreleye agent on host [%s]\",", "Barreleye server with thye of BarreleServer self.bea_barreleye_server = barreleye_server # Host to run", "service_name) if ret: log.cl_error(\"failed to enable service [%s] on host [%s]\", service_name, host.sh_hostname)", "collect Lustre OSS metrics from this agent. self.bea_enable_lustre_oss = enable_lustre_oss # Whether to", "host [%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version = version return 0 def _bea_generate_collectd_config(self, log, barreleye_instance,", "tags.items(): if tag_string != \"\": tag_string += \" AND\" else: tag_string = \"", "host.sh_service_start(log, service_name) if ret: log.cl_error(\"failed to start [%s] service on agent host [%s]\",", "barreleye_instance): \"\"\" Steps before configuring Barreleye agent \"\"\" ret = self._bea_sanity_check(log) if ret:", "+ \"/\" if test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test else: fpath +=", "= self._bea_check_lustre_version(log, barreleye_instance.bei_lustre_fallback_version) if ret: log.cl_error(\"failed to check the Lustre version on Barreleye", "data [%s], no [results]\", json_string) return -1 results = data[\"results\"] if len(results) !=", "is None: log.cl_error(\"failed to get the Collectd RPM version on host [%s]\", host.sh_hostname)", "# Collectd RPMs needed to be installed in this agent. self.bea_needed_collectd_rpm_types = \\", "if ret: log.cl_error(\"failed to check the connection of Barreleye agent \" \"[%s] with", "if ret: log.cl_error(\"failed to restart Collectd service on host [%s]\", host.sh_hostname) return -1", "log.cl_debug(\"got wrong InfluxDB data [%s], no [values] in one \" \"of the series\",", "host [%s]\", host.sh_hostname) ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if ret: log.cl_error(\"failed to send", "collect disk metrics from this agent. self.bea_enable_disk = enable_disk # Whether to collect", "or self.bea_enable_lustre_mds or self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt)", "[%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 # If", "[%s] has unsupported CPU type [%s]\", self.bea_host.sh_hostname, cpu_target) return -1 command = (\"hostname\")", "# Whether to collect Lustre MDS metrics from this agent. self.bea_enable_lustre_mds = enable_lustre_mds", "if \"columns\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [columns] in", "if time_index == -1: log.cl_debug(\"got wrong InfluxDB data [%s], no [time] in \"", "retval.cr_stdout.split() rpm_fnames = [] for rpm_name in rpm_names: rpm_fnames.append(rpm_name + \".rpm\") version, _", "bea_collectd_stop(self, log): \"\"\" Stop Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host", "(\"rpm -qa | grep lustre | grep -v kernel\") retval = self.bea_host.sh_run(log, command)", "= [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 rpm_names = retval.cr_stdout.split() rpm_fnames", "lustre_fallback_version return 0 if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host [%s],", "stdout = [%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1", "if \"series\" not in result: log.cl_debug(\"got wrong InfluxDB data [%s], no [series] in", "query = ('SELECT * FROM \"%s\"%s ORDER BY time DESC LIMIT 1;' %", "config Lustre plugin of Collectd\") return None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config def", "return -1 return 0 def _bea_influxdb_measurement_check(self, log, measurement_name, tags): # pylint: disable=bare-except,too-many-return-statements #", "% barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run command [%s]", "import HTTPStatus from pycoral import utils from pycoral import lustre_version from pycoral import", "log.cl_debug(\"got InfluxDB status [%d] with query [%s]\", response.status_code, query) return -1 data =", "1: log.cl_debug(\"got wrong InfluxDB data [%s], [results] is not a \" \"array with", "commands. self.bea_host = host # Whether to collect disk metrics from this agent.", "agent. Barreleye is a performance monitoring system for Lustre. \"\"\" import json from", "server\", self.bea_host.sh_hostname) return -1 distro = self.bea_host.sh_distro(log) if distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]:", "BarreleServer self.bea_barreleye_server = barreleye_server # Host to run commands. self.bea_host = host #", "[%s]\", self.bea_host.sh_hostname, cpu_target) return -1 command = (\"hostname\") retval = self.bea_host.sh_run(log, command) if", "def bea_generate_configs(self, log, barreleye_instance): \"\"\" Steps before configuring Barreleye agent \"\"\" ret =", "version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version = version return 0 def _bea_generate_collectd_config(self, log, barreleye_instance, collectd_test=False): \"\"\"", "test\") return -1 self.bea_collectd_config_for_test = collectd_config collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=False) if collectd_config", "[%d], stdout = [%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return", "[%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 return 0", "+ \".rpm\") version, _ = lustre_version.match_lustre_version_from_rpms(log, rpm_fnames, skip_kernel=True, skip_test=True) if version is None:", "if ret: log.cl_error(\"Influxdb gets no data point for measurement [%s] \" \"from agent", "whether the Collectd is running. Return 1 if running. Return -1 if failure.", "def _bea_check_lustre_version(self, log, lustre_fallback_version): \"\"\" Check the Lustre version according to the installed", "if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config def bea_generate_configs(self, log, barreleye_instance): \"\"\" Steps before configuring", "bea_influxdb_measurement_check(self, log, measurement_name, **tags): \"\"\" Check whether influxdb has datapoint \"\"\" if \"fqdn\"", "version [%s] on host [%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version = version return 0 def", "\"\"\" Check the Lustre version according to the installed RPMs \"\"\" # pylint:", "find the # problem as early as possible. barreleye_server = self.bea_barreleye_server command =", "agent host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_version(self, log): \"\"\"", "self.bea_enable_infiniband = enable_infiniband # Lustre version on this host. self.bea_lustre_version = None #", "if version is None: log.cl_error(\"failed to get the Collectd RPM version on host", "len(serie_values) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [values] is not a \"", "def bea_collectd_stop(self, log): \"\"\" Stop Collectd service. \"\"\" service_name = \"collectd\" host =", "target cpu on host [%s]\", self.bea_host.sh_hostname) return -1 if cpu_target != \"x86_64\": log.cl_error(\"host", "\"\"\" if \"fqdn\" not in tags: tags[\"fqdn\"] = self.bea_host.sh_hostname ret = utils.wait_condition(log, self._bea_influxdb_measurement_check,", "\"values\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [values] in one", "def bea_influxdb_measurement_check(self, log, measurement_name, **tags): \"\"\" Check whether influxdb has datapoint \"\"\" if", "restart Collectd service on host [%s]\", host.sh_hostname) return -1 log.cl_info(\"checking whether Influxdb can", "the host before installation \"\"\" ret = self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed to check", "skip_kernel=True, skip_test=True) if version is None: log.cl_warning(\"failed to match Lustre version according to", "= \"collectd\" ret = host.sh_service_restart(log, service_name) if ret: log.cl_error(\"failed to restart Collectd service", "[%s] on host [%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_running(self, log):", "\" \"of the series\", json_string) return -1 serie_values = serie[\"values\"] if len(serie_values) !=", "self.bea_barreleye_server command = (\"ping -c 1 %s\" % barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log, command)", "self.bea_host version = host.sh_rpm_version(log, \"collectd-\") if version is None: log.cl_error(\"failed to get the", "disk metrics from this agent. self.bea_enable_disk = enable_disk # Whether to collect Lustre", "is-active collectd\" retval = self.bea_host.sh_run(log, command) if retval.cr_stdout == \"active\\n\": return 1 if", "= influxdb_client.bic_query(log, query, epoch=\"s\") if response is None: log.cl_debug(\"failed to with query Influxdb", "log): \"\"\" Stop Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host ret", "def _bea_sanity_check(self, log): \"\"\" Sanity check of the host before installation \"\"\" ret", "host.sh_hostname) return -1 return 0 def bea_collectd_version(self, log): \"\"\" Return the Collectd version,", "test. Type: CollectdConfig self.bea_collectd_config_for_test = None # Collectd config for production. Type: CollectdConfig", "self.bea_host fpath = barreleye_instance.bei_workspace + \"/\" if test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config =", "return -1 # If the hostname is inconsistent with the configured hostname, #", "to check the Lustre version on Barreleye \" \"agent [%s]\", self.bea_host.sh_hostname) return -1", "= self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed to check the connection of Barreleye agent \"", "does not \" \"exist\", rpm_type, self.bea_host.sh_hostname) return -1 return 0 def _bea_influxdb_measurement_check(self, log,", "to restart Collectd service on host [%s]\", host.sh_hostname) return -1 log.cl_info(\"checking whether Influxdb", "InfluxDB data [%s], [results] is not a \" \"array with only one element\",", "skip_test=True) if version is None: log.cl_warning(\"failed to match Lustre version according to RPM", "to be updated. self.bea_influxdb_update_time = None # Collectd config for test. Type: CollectdConfig", "= (\"ping -c 1 %s\" % barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status:", "version according to RPM \" \"names on host [%s], using default [%s]\", self.bea_host.sh_hostname,", "is None: log.cl_error(\"failed to generate Collectd config for test\") return -1 self.bea_collectd_config_for_test =", "collectd_config = self.bea_collectd_config_for_test else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production fpath += \".\"", "access Barreyele server, find the # problem as early as possible. barreleye_server =", "and send the collectd.conf to the agent host \"\"\" host = self.bea_host fpath", "on host [%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version = version return 0 def _bea_generate_collectd_config(self, log,", "agent has an object of this type \"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self,", "\" \"agent [%s]\", self.bea_host.sh_hostname) return -1 collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if collectd_config", "columns\", json_string) return -1 timestamp = int(value[time_index]) if self.bea_influxdb_update_time is None: self.bea_influxdb_update_time =", "the connection of Barreleye agent \" \"[%s] with server\", self.bea_host.sh_hostname) return -1 distro", "in result: log.cl_debug(\"got wrong InfluxDB data [%s], no [series] in one \" \"of", "is not a \" \"array with only one element\", json_string) return -1 serie", "the Collectd is running. Return 1 if running. Return -1 if failure. \"\"\"", "= host # Whether to collect disk metrics from this agent. self.bea_enable_disk =", "import lustre_version from pycoral import ssh_host from pybarreleye import barrele_collectd class BarreleAgent(): \"\"\"", "on local host [%s] to \" \"directory [%s] on host [%s]\", fpath, etc_path,", "# Barreleye server with thye of BarreleServer self.bea_barreleye_server = barreleye_server # Host to", "if test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config", "config \"\"\" if collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST else: interval = barreleye_instance.bei_collect_interval collectd_config =", "installed on host [%s], \" \"using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version", "columns = serie[\"columns\"] if \"values\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s],", "host [%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname) return -1 return 0 def bea_config_agent(self, log,", "import json from http import HTTPStatus from pycoral import utils from pycoral import", "-1 timestamp = int(value[time_index]) if self.bea_influxdb_update_time is None: self.bea_influxdb_update_time = timestamp elif timestamp", "else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production fpath += \".\" + host.sh_hostname collectd_config.cdc_dump(fpath)", "one element\", json_string) return -1 result = results[0] if \"series\" not in result:", "tags)) if ret: log.cl_error(\"Influxdb gets no data point for measurement [%s] \" \"from", "fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME collectd_config = self.bea_collectd_config_for_production fpath += \".\" + host.sh_hostname collectd_config.cdc_dump(fpath) etc_path", "\"columns\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [columns] in one", "agent. self.bea_enable_infiniband = enable_infiniband # Lustre version on this host. self.bea_lustre_version = None", "\"usage\") return -1 self.bea_collectd_config_for_production = collectd_config # Check that needed collectd RPMs are", "host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb doesn't have expected data points from", "Sanity check of the host before installation \"\"\" ret = self._bea_check_connection_with_server(log) if ret:", "wrong InfluxDB data [%s], [series] is not a \" \"array with only one", "barreleye_instance, collectd_test=False): \"\"\" Generate Collectd config \"\"\" if collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST else:", "json_string) return -1 serie = series[0] if \"columns\" not in serie: log.cl_debug(\"got wrong", "-1 if cpu_target != \"x86_64\": log.cl_error(\"host [%s] has unsupported CPU type [%s]\", self.bea_host.sh_hostname,", "= self.bea_host.sh_target_cpu(log) if cpu_target is None: log.cl_error(\"failed to get target cpu on host", "log.cl_error(\"failed to check the connection of Barreleye agent \" \"[%s] with server\", self.bea_host.sh_hostname)", "ret: log.cl_error(\"failed to start [%s] service on agent host [%s]\", service_name, host.sh_hostname) return", "query [%s]\", response.status_code, query) return -1 data = response.json() json_string = json.dumps(data, indent=4,", "Lustre MDS metrics from this agent. self.bea_enable_lustre_mds = enable_lustre_mds # Whether to collect", "= retval.cr_stdout.strip() if hostname != self.bea_host.sh_hostname: log.cl_error(\"inconsistent hostname [%s] of Barreleye agent \"", "last timestamp when a measurement has been found to be updated. self.bea_influxdb_update_time =", "of the host before installation \"\"\" ret = self._bea_check_connection_with_server(log) if ret: log.cl_error(\"failed to", "service_name = \"collectd\" host = self.bea_host ret = host.sh_service_start(log, service_name) if ret: log.cl_error(\"failed", "Lustre plugin of Collectd\") return None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config def bea_generate_configs(self,", "not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has unsupported distro [%s]\", self.bea_host.sh_hostname, distro) return", "barreleye_instance, test_config=False) if ret: log.cl_error(\"failed to send final Collectd config to Barreleye \"", "is not a \" \"array with only one element\", json_string) return -1 value", "self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if collectd_config is None: log.cl_error(\"failed to generate Collectd config for", "not installed on host [%s], \" \"using default [%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version =", "[%s], no [time] in \" \"the columns\", json_string) return -1 timestamp = int(value[time_index])", "!= \"\": tag_string += \" AND\" else: tag_string = \" WHERE\" tag_string +=", "service. \"\"\" service_name = \"collectd\" host = self.bea_host ret = host.sh_service_start(log, service_name) if", "-c 1 %s\" % barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to", "If the hostname is inconsistent with the configured hostname, # fqdn tag of", "%s\" % barreleye_server.bes_server_host.sh_hostname) retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run command", "log.cl_info(\"configuring Collectd on host [%s]\", host.sh_hostname) ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=True) if ret:", "[%s]\", self.bea_host.sh_hostname, lustre_fallback_version.lv_name) self.bea_lustre_version = lustre_fallback_version return 0 if retval.cr_exit_status: log.cl_error(\"failed to run", "configuring Barreleye agent \"\"\" ret = self._bea_sanity_check(log) if ret: log.cl_error(\"Barreleye agent host [%s]", "wrong InfluxDB data [%s], no [results]\", json_string) return -1 results = data[\"results\"] if", "log.cl_info(\"checking whether Influxdb can get data points from \" \"agent [%s]\", host.sh_hostname) ret", "Configure agent \"\"\" host = self.bea_host log.cl_info(\"configuring Collectd on host [%s]\", host.sh_hostname) ret", "InfluxDB data [%s], no [results]\", json_string) return -1 results = data[\"results\"] if len(results)", "self.bea_enable_lustre_client): ret = collectd_config.cdc_plugin_lustre(log, self.bea_lustre_version, enable_lustre_oss=self.bea_enable_lustre_oss, enable_lustre_mds=self.bea_enable_lustre_mds, enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed", "return -1 rpm_names = retval.cr_stdout.split() rpm_fnames = [] for rpm_name in rpm_names: rpm_fnames.append(rpm_name", "log.cl_debug(\"got wrong InfluxDB data [%s], [series] is not a \" \"array with only", "Generate Collectd config \"\"\" if collectd_test: interval = barrele_collectd.COLLECTD_INTERVAL_TEST else: interval = barreleye_instance.bei_collect_interval", "[%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_version(self, log): \"\"\" Return the", "version on Barreleye \" \"agent [%s]\", self.bea_host.sh_hostname) return -1 collectd_config = self._bea_generate_collectd_config(log, barreleye_instance,", "by InfluxDB \"\"\" tag_string = \"\" for key, value in tags.items(): if tag_string", "Lustre version [%s] on host [%s]\", version.lv_name, self.bea_host.sh_hostname) self.bea_lustre_version = version return 0", "json_string) return -1 result = results[0] if \"series\" not in result: log.cl_debug(\"got wrong", "= \"/etc/collectd.conf\" ret = host.sh_send_file(log, fpath, etc_path) if ret: log.cl_error(\"failed to send file", "thye of BarreleServer self.bea_barreleye_server = barreleye_server # Host to run commands. self.bea_host =", "this host. self.bea_lustre_version = None # Collectd RPMs needed to be installed in", "5.12.0.barreleye0-1.el7.x86_64 \"\"\" host = self.bea_host version = host.sh_rpm_version(log, \"collectd-\") if version is None:", "\"array with only one element\", json_string) return -1 value = serie_values[0] time_index =", "be updated. self.bea_influxdb_update_time = None # Collectd config for test. Type: CollectdConfig self.bea_collectd_config_for_test", "\"agent [%s]\", self.bea_host.sh_hostname) return -1 collectd_config = self._bea_generate_collectd_config(log, barreleye_instance, collectd_test=True) if collectd_config is", "barreleye_instance.bei_collect_interval collectd_config = \\ barrele_collectd.CollectdConfig(self, interval, barreleye_instance.bei_jobstat_pattern) if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or self.bea_enable_lustre_client):", "influxdb has datapoint \"\"\" if \"fqdn\" not in tags: tags[\"fqdn\"] = self.bea_host.sh_hostname ret", "log.cl_debug(\"got wrong InfluxDB data [%s], [values] is not a \" \"array with only", "wrong InfluxDB data [%s], no [columns] in one \" \"of the series\", json_string)", "log.cl_error(\"Influxdb gets no data point for measurement [%s] \" \"from agent [%s]\", measurement_name,", "enable_disk # Whether to collect Lustre OSS metrics from this agent. self.bea_enable_lustre_oss =", "kernel RPMs. command = (\"rpm -qa | grep lustre | grep -v kernel\")", "barreleye_server = self.bea_barreleye_server command = (\"ping -c 1 %s\" % barreleye_server.bes_server_host.sh_hostname) retval =", "Return -1 if failure. \"\"\" command = \"systemctl is-active collectd\" retval = self.bea_host.sh_run(log,", "\"inactive\\n\": return 0 log.cl_error(\"unexpected stdout of command [%s] on host [%s], \" \"ret", "if ret: log.cl_error(\"failed to stop [%s] service on agent host [%s]\", service_name, host.sh_hostname)", "import ssh_host from pybarreleye import barrele_collectd class BarreleAgent(): \"\"\" Each agent has an", "service_name, host.sh_hostname) return -1 return 0 def bea_collectd_start(self, log): \"\"\" Start Collectd service.", "+= 1 if time_index == -1: log.cl_debug(\"got wrong InfluxDB data [%s], no [time]", "None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config def bea_generate_configs(self, log, barreleye_instance): \"\"\" Steps before", "not in tags: tags[\"fqdn\"] = self.bea_host.sh_hostname ret = utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags)) if", "\"\"\" Check whether the Collectd is running. Return 1 if running. Return -1", "the agent host \"\"\" host = self.bea_host fpath = barreleye_instance.bei_workspace + \"/\" if", "self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if ret: log.cl_error(\"failed to send final Collectd config to Barreleye", "of Barreleye agent \" \"host [%s]\", hostname, self.bea_host.sh_hostname) return -1 return 0 def", "self._bea_influxdb_measurement_check, (measurement_name, tags)) if ret: log.cl_error(\"Influxdb gets no data point for measurement [%s]", "\"\"\" Start Collectd service. \"\"\" service_name = \"collectd\" host = self.bea_host ret =", "self.bea_collectd_config_for_production fpath += \".\" + host.sh_hostname collectd_config.cdc_dump(fpath) etc_path = \"/etc/collectd.conf\" ret = host.sh_send_file(log,", "host.sh_hostname) return -1 ret = self.bea_collectd_send_config(log, barreleye_instance, test_config=False) if ret: log.cl_error(\"failed to send", "service_name) if ret: log.cl_error(\"failed to restart Barreleye agent on host [%s]\", host.sh_hostname) return", "log.cl_error(\"failed to get the Collectd RPM version on host [%s]\", host.sh_hostname) return version", "command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 def bea_collectd_stop(self, log): \"\"\" Stop Collectd", "\"[%s] with server\", self.bea_host.sh_hostname) return -1 distro = self.bea_host.sh_distro(log) if distro not in", "whether Influxdb can get data points from \" \"agent [%s]\", host.sh_hostname) ret =", "barrele_collectd class BarreleAgent(): \"\"\" Each agent has an object of this type \"\"\"", "retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 return 0 def _bea_sanity_check(self, log): \"\"\" Sanity check", "retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run command [%s] on host", "command [%s] on host [%s], \" \"ret = [%d], stdout = [%s], stderr", "== \"active\\n\": return 1 if retval.cr_stdout == \"unknown\\n\": return 0 if retval.cr_stdout ==", "[%s]\", service_name, host.sh_hostname) return -1 return 0 def bea_collectd_running(self, log): \"\"\" Check whether", "in tags: tags[\"fqdn\"] = self.bea_host.sh_hostname ret = utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags)) if ret:", "Library for Barreleye agent. Barreleye is a performance monitoring system for Lustre. \"\"\"", "results = data[\"results\"] if len(results) != 1: log.cl_debug(\"got wrong InfluxDB data [%s], [results]", "if tag_string != \"\": tag_string += \" AND\" else: tag_string = \" WHERE\"", "if cpu_target is None: log.cl_error(\"failed to get target cpu on host [%s]\", self.bea_host.sh_hostname)", "series[0] if \"columns\" not in serie: log.cl_debug(\"got wrong InfluxDB data [%s], no [columns]", "ret: log.cl_error(\"failed to check the connection of Barreleye agent \" \"[%s] with server\",", "Whether to collect Lustre OSS metrics from this agent. self.bea_enable_lustre_oss = enable_lustre_oss #", "-1 return 0 def bea_collectd_version(self, log): \"\"\" Return the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64", "tags[\"fqdn\"] = self.bea_host.sh_hostname ret = utils.wait_condition(log, self._bea_influxdb_measurement_check, (measurement_name, tags)) if ret: log.cl_error(\"Influxdb gets", "collectd_test=False) if collectd_config is None: log.cl_error(\"failed to generate Collectd config for production \"", "Collectd config to Barreleye \" \"agent on host [%s]\", host.sh_hostname) return -1 ret", "service_name) if ret: log.cl_error(\"failed to stop [%s] service on agent host [%s]\", service_name,", "# pylint: disable=too-many-return-statements,too-many-branches # Old Lustre kernel RPM might not be uninstalled ye,", "-1 serie_values = serie[\"values\"] if len(serie_values) != 1: log.cl_debug(\"got wrong InfluxDB data [%s],", "with thye of BarreleServer self.bea_barreleye_server = barreleye_server # Host to run commands. self.bea_host", "[%s], stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 def bea_collectd_stop(self,", "an object of this type \"\"\" # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__(self, host, barreleye_server,", "AND\" else: tag_string = \" WHERE\" tag_string += (\" %s = '%s'\" %", "self.bea_influxdb_update_time is None: self.bea_influxdb_update_time = timestamp elif timestamp > self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp", "from pycoral import lustre_version from pycoral import ssh_host from pybarreleye import barrele_collectd class", "query [%s]\", query) return -1 if response.status_code != HTTPStatus.OK: log.cl_debug(\"got InfluxDB status [%d]", "configured hostname, # fqdn tag of the data points will be unexpected. hostname", "disable=bare-except,too-many-return-statements # pylint: disable=too-many-locals,too-many-branches,too-many-statements \"\"\" Check whether the datapoint is recieved by InfluxDB", "stderr = [%s]\", command, self.bea_host.sh_hostname, retval.cr_exit_status, retval.cr_stdout, retval.cr_stderr) return -1 rpm_names = retval.cr_stdout.split()", "return -1 return 0 def bea_collectd_running(self, log): \"\"\" Check whether the Collectd is", "ret: log.cl_error(\"failed to send final Collectd config to Barreleye \" \"agent on host", "generate Collectd config for test\") return -1 self.bea_collectd_config_for_test = collectd_config collectd_config = self._bea_generate_collectd_config(log,", "kernel\") retval = self.bea_host.sh_run(log, command) if (retval.cr_exit_status == 1 and retval.cr_stdout == \"\"", "rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict: log.cl_error(\"needed Collectd RPM [%s] of agent [%s] does not", "query [%s]\", timestamp, query) return -1 def bea_influxdb_measurement_check(self, log, measurement_name, **tags): \"\"\" Check", "self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband() return collectd_config def bea_generate_configs(self, log, barreleye_instance): \"\"\" Steps before configuring Barreleye", "test_config=True) if ret: log.cl_error(\"failed to send test config to Barreleye agent \" \"on", "query) return -1 def bea_influxdb_measurement_check(self, log, measurement_name, **tags): \"\"\" Check whether influxdb has", "in this agent. self.bea_needed_collectd_rpm_types = \\ [barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME, barrele_collectd.COLLECTD_TYPE_NAME] # The last timestamp when", "\" \"agent [%s]\", host.sh_hostname) ret = self.bea_collectd_config_for_test.cdc_check(log) if ret: log.cl_error(\"Influxdb doesn't have expected", "= self.bea_host.sh_run(log, command) if (retval.cr_exit_status == 1 and retval.cr_stdout == \"\" and retval.cr_stderr", "Infiniband metrics from this agent. self.bea_enable_infiniband = enable_infiniband # Lustre version on this", "ret: log.cl_error(\"failed to config Lustre plugin of Collectd\") return None if self.bea_enable_infiniband: collectd_config.cdc_plugin_infiniband()", "timestamp elif timestamp > self.bea_influxdb_update_time: return 0 log.cl_debug(\"timestamp [%d] is not updated with", "not a \" \"array with only one element\", json_string) return -1 result =", "json_string) if \"results\" not in data: log.cl_debug(\"got wrong InfluxDB data [%s], no [results]\",", "\"\"\" Check whether the datapoint is recieved by InfluxDB \"\"\" tag_string = \"\"", "collect Lustre client metrics from this agent. self.bea_enable_lustre_client = enable_lustre_client # Whether to", "to match Lustre version according to RPM \" \"names on host [%s], using", "\" \"of the series\", json_string) return -1 columns = serie[\"columns\"] if \"values\" not", "is a performance monitoring system for Lustre. \"\"\" import json from http import", "1: log.cl_debug(\"got wrong InfluxDB data [%s], [values] is not a \" \"array with", "= self.bea_host fpath = barreleye_instance.bei_workspace + \"/\" if test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config", "\"%s\"%s ORDER BY time DESC LIMIT 1;' % (measurement_name, tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client", "key, value in tags.items(): if tag_string != \"\": tag_string += \" AND\" else:", "enable_lustre_client=self.bea_enable_lustre_client, enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost, enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt) if ret: log.cl_error(\"failed to config Lustre plugin of Collectd\") return", "-1 return 0 def bea_config_agent(self, log, barreleye_instance): \"\"\" Configure agent \"\"\" host =", "not updated with query [%s]\", timestamp, query) return -1 def bea_influxdb_measurement_check(self, log, measurement_name,", "service_name, host.sh_hostname) return -1 return 0 def bea_collectd_version(self, log): \"\"\" Return the Collectd", "\"/\" if test_config: fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME collectd_config = self.bea_collectd_config_for_test else: fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME", "the Lustre version according to the installed RPMs \"\"\" # pylint: disable=too-many-return-statements,too-many-branches #", "Lustre OSS metrics from this agent. self.bea_enable_lustre_oss = enable_lustre_oss # Whether to collect", "is recieved by InfluxDB \"\"\" tag_string = \"\" for key, value in tags.items():", "json.dumps(data, indent=4, separators=(',', ': ')) log.cl_debug(\"data: [%s]\", json_string) if \"results\" not in data:", "[time] in \" \"the columns\", json_string) return -1 timestamp = int(value[time_index]) if self.bea_influxdb_update_time", "# Whether to collect Lustre client metrics from this agent. self.bea_enable_lustre_client = enable_lustre_client", "the result\", json_string) return -1 series = result[\"series\"] if len(series) != 1: log.cl_debug(\"got", "self.bea_collectd_config_for_production = collectd_config # Check that needed collectd RPMs are installed for rpm_type", "rpm_fnames = [] for rpm_name in rpm_names: rpm_fnames.append(rpm_name + \".rpm\") version, _ =", "1;' % (measurement_name, tag_string)) influxdb_client = self.bea_barreleye_server.bes_influxdb_client response = influxdb_client.bic_query(log, query, epoch=\"s\") if", "ret: log.cl_error(\"Barreleye agent host [%s] is insane\", self.bea_host.sh_hostname) return -1 ret = self._bea_check_lustre_version(log,", "Collectd RPM [%s] of agent [%s] does not \" \"exist\", rpm_type, self.bea_host.sh_hostname) return", "= (\"hostname\") retval = self.bea_host.sh_run(log, command) if retval.cr_exit_status: log.cl_error(\"failed to run command [%s]", "self.bea_enable_disk = enable_disk # Whether to collect Lustre OSS metrics from this agent.", "if distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]: log.cl_error(\"host [%s] has unsupported distro [%s]\", self.bea_host.sh_hostname,", "log.cl_debug(\"got wrong InfluxDB data [%s], no [time] in \" \"the columns\", json_string) return", "query Influxdb with query [%s]\", query) return -1 if response.status_code != HTTPStatus.OK: log.cl_debug(\"got", "running. Return -1 if failure. \"\"\" command = \"systemctl is-active collectd\" retval =", "log.cl_error(\"failed to generate Collectd config for production \" \"usage\") return -1 self.bea_collectd_config_for_production =", "command = \"systemctl is-active collectd\" retval = self.bea_host.sh_run(log, command) if retval.cr_stdout == \"active\\n\":", "response = influxdb_client.bic_query(log, query, epoch=\"s\") if response is None: log.cl_debug(\"failed to with query", "host [%s] to \" \"directory [%s] on host [%s]\", fpath, etc_path, barreleye_instance.bei_local_host.sh_hostname, host.sh_hostname)" ]
[ "the scraper, you don't need to run this script. # $1 is a", "into etcd. # If you've never run the scraper, you don't need to", "'/todoist/%s' % sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1', port=2379) # Copy across our auth details", "% etcd_path, f.read()) # Dump the finished state def dumpdir(path): dir = etcd_client.get(path)", "the tripit user authentication details # in etcd. import json import os import", "user authentication details # in etcd. import json import os import sys import", "# Dump the finished state def dumpdir(path): dir = etcd_client.get(path) for result in", "run the scraper, you don't need to run this script. # $1 is", "tripit user authentication details # in etcd. import json import os import sys", "identifies the tripit user authentication details # in etcd. import json import os", "don't need to run this script. # $1 is a job tag, which", "which identifies the tripit user authentication details # in etcd. import json import", "import sys import etcd etcd_path = '/todoist/%s' % sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1', port=2379)", "= etcd.Client(host='192.168.50.1', port=2379) # Copy across our auth details with open(os.path.expanduser('~/.todoist')) as f:", "auth details with open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth' % etcd_path, f.read()) # Dump the", "= '/todoist/%s' % sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1', port=2379) # Copy across our auth", "old tripit auth details into etcd. # If you've never run the scraper,", "scraper, you don't need to run this script. # $1 is a job", "run this script. # $1 is a job tag, which identifies the tripit", "authentication details # in etcd. import json import os import sys import etcd", "etcd etcd_path = '/todoist/%s' % sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1', port=2379) # Copy across", "script which copies old tripit auth details into etcd. # If you've never", "you don't need to run this script. # $1 is a job tag,", "for result in dir.children: if result.dir: dumpdir(result.key) else: print('%s: %s' %(result.key, result.value)) dumpdir(etcd_path)", "# $1 is a job tag, which identifies the tripit user authentication details", "tripit auth details into etcd. # If you've never run the scraper, you", "import etcd etcd_path = '/todoist/%s' % sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1', port=2379) # Copy", "open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth' % etcd_path, f.read()) # Dump the finished state def", "etcd_client = etcd.Client(host='192.168.50.1', port=2379) # Copy across our auth details with open(os.path.expanduser('~/.todoist')) as", "is a job tag, which identifies the tripit user authentication details # in", "auth details into etcd. # If you've never run the scraper, you don't", "sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1', port=2379) # Copy across our auth details with open(os.path.expanduser('~/.todoist'))", "etcd.Client(host='192.168.50.1', port=2379) # Copy across our auth details with open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth'", "port=2379) # Copy across our auth details with open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth' %", "to run this script. # $1 is a job tag, which identifies the", "a job tag, which identifies the tripit user authentication details # in etcd.", "etcd_client.write('%s/auth' % etcd_path, f.read()) # Dump the finished state def dumpdir(path): dir =", "our auth details with open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth' % etcd_path, f.read()) # Dump", "job tag, which identifies the tripit user authentication details # in etcd. import", "# This is a simple script which copies old tripit auth details into", "os import sys import etcd etcd_path = '/todoist/%s' % sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1',", "Dump the finished state def dumpdir(path): dir = etcd_client.get(path) for result in dir.children:", "import os import sys import etcd etcd_path = '/todoist/%s' % sys.argv[1] etcd_client =", "across our auth details with open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth' % etcd_path, f.read()) #", "details # in etcd. import json import os import sys import etcd etcd_path", "details with open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth' % etcd_path, f.read()) # Dump the finished", "def dumpdir(path): dir = etcd_client.get(path) for result in dir.children: if result.dir: dumpdir(result.key) else:", "f.read()) # Dump the finished state def dumpdir(path): dir = etcd_client.get(path) for result", "If you've never run the scraper, you don't need to run this script.", "in etcd. import json import os import sys import etcd etcd_path = '/todoist/%s'", "# Copy across our auth details with open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth' % etcd_path,", "as f: etcd_client.write('%s/auth' % etcd_path, f.read()) # Dump the finished state def dumpdir(path):", "is a simple script which copies old tripit auth details into etcd. #", "need to run this script. # $1 is a job tag, which identifies", "sys import etcd etcd_path = '/todoist/%s' % sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1', port=2379) #", "never run the scraper, you don't need to run this script. # $1", "etcd. import json import os import sys import etcd etcd_path = '/todoist/%s' %", "f: etcd_client.write('%s/auth' % etcd_path, f.read()) # Dump the finished state def dumpdir(path): dir", "state def dumpdir(path): dir = etcd_client.get(path) for result in dir.children: if result.dir: dumpdir(result.key)", "% sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1', port=2379) # Copy across our auth details with", "dumpdir(path): dir = etcd_client.get(path) for result in dir.children: if result.dir: dumpdir(result.key) else: print('%s:", "a simple script which copies old tripit auth details into etcd. # If", "Copy across our auth details with open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth' % etcd_path, f.read())", "you've never run the scraper, you don't need to run this script. #", "etcd_client.get(path) for result in dir.children: if result.dir: dumpdir(result.key) else: print('%s: %s' %(result.key, result.value))", "json import os import sys import etcd etcd_path = '/todoist/%s' % sys.argv[1] etcd_client", "import json import os import sys import etcd etcd_path = '/todoist/%s' % sys.argv[1]", "details into etcd. # If you've never run the scraper, you don't need", "script. # $1 is a job tag, which identifies the tripit user authentication", "$1 is a job tag, which identifies the tripit user authentication details #", "# If you've never run the scraper, you don't need to run this", "tag, which identifies the tripit user authentication details # in etcd. import json", "the finished state def dumpdir(path): dir = etcd_client.get(path) for result in dir.children: if", "= etcd_client.get(path) for result in dir.children: if result.dir: dumpdir(result.key) else: print('%s: %s' %(result.key,", "dir = etcd_client.get(path) for result in dir.children: if result.dir: dumpdir(result.key) else: print('%s: %s'", "finished state def dumpdir(path): dir = etcd_client.get(path) for result in dir.children: if result.dir:", "etcd_path = '/todoist/%s' % sys.argv[1] etcd_client = etcd.Client(host='192.168.50.1', port=2379) # Copy across our", "This is a simple script which copies old tripit auth details into etcd.", "simple script which copies old tripit auth details into etcd. # If you've", "etcd_path, f.read()) # Dump the finished state def dumpdir(path): dir = etcd_client.get(path) for", "copies old tripit auth details into etcd. # If you've never run the", "# in etcd. import json import os import sys import etcd etcd_path =", "this script. # $1 is a job tag, which identifies the tripit user", "which copies old tripit auth details into etcd. # If you've never run", "#!/usr/bin/python2.7 # This is a simple script which copies old tripit auth details", "with open(os.path.expanduser('~/.todoist')) as f: etcd_client.write('%s/auth' % etcd_path, f.read()) # Dump the finished state", "etcd. # If you've never run the scraper, you don't need to run" ]
[ "print(\"Observation space shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep in range(100): print \"ep: \",", "{}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep in range(100): print \"ep: \", ep state =", "gym import numpy as np from matplotlib import pyplot as plt env =", "\", ep state = env.reset() while True: action = np.random.choice(np.arange(env.action_space.n)) _, _, done,", "space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation = env.reset() print(\"Observation space shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array'))", "<reponame>viv92/wildML_RLimplementations import gym import numpy as np from matplotlib import pyplot as plt", "matplotlib import pyplot as plt env = gym.envs.make(\"Breakout-v0\") print(\"Action space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings())", "env.reset() while True: action = np.random.choice(np.arange(env.action_space.n)) _, _, done, _ = env.step(action) #plt.figure()", "import gym import numpy as np from matplotlib import pyplot as plt env", "space shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep in range(100): print \"ep: \", ep", "= env.reset() while True: action = np.random.choice(np.arange(env.action_space.n)) _, _, done, _ = env.step(action)", "size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation = env.reset() print(\"Observation space shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for", "np from matplotlib import pyplot as plt env = gym.envs.make(\"Breakout-v0\") print(\"Action space size:", "print(\"Action space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation = env.reset() print(\"Observation space shape: {}\".format(observation.shape)) plt.figure()", "from matplotlib import pyplot as plt env = gym.envs.make(\"Breakout-v0\") print(\"Action space size: {}\".format(env.action_space.n))", "plt.imshow(env.render(mode='rgb_array')) for ep in range(100): print \"ep: \", ep state = env.reset() while", "state = env.reset() while True: action = np.random.choice(np.arange(env.action_space.n)) _, _, done, _ =", "as np from matplotlib import pyplot as plt env = gym.envs.make(\"Breakout-v0\") print(\"Action space", "observation = env.reset() print(\"Observation space shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep in range(100):", "= env.reset() print(\"Observation space shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep in range(100): print", "plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep in range(100): print \"ep: \", ep state = env.reset()", "env.reset() print(\"Observation space shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep in range(100): print \"ep:", "print \"ep: \", ep state = env.reset() while True: action = np.random.choice(np.arange(env.action_space.n)) _,", "np.random.choice(np.arange(env.action_space.n)) _, _, done, _ = env.step(action) #plt.figure() plt.imshow(env.render(mode='rgb_array')) if done: break env.render(close=True)", "\"ep: \", ep state = env.reset() while True: action = np.random.choice(np.arange(env.action_space.n)) _, _,", "for ep in range(100): print \"ep: \", ep state = env.reset() while True:", "ep in range(100): print \"ep: \", ep state = env.reset() while True: action", "= np.random.choice(np.arange(env.action_space.n)) _, _, done, _ = env.step(action) #plt.figure() plt.imshow(env.render(mode='rgb_array')) if done: break", "action = np.random.choice(np.arange(env.action_space.n)) _, _, done, _ = env.step(action) #plt.figure() plt.imshow(env.render(mode='rgb_array')) if done:", "shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep in range(100): print \"ep: \", ep state", "numpy as np from matplotlib import pyplot as plt env = gym.envs.make(\"Breakout-v0\") print(\"Action", "plt env = gym.envs.make(\"Breakout-v0\") print(\"Action space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation = env.reset() print(\"Observation", "import numpy as np from matplotlib import pyplot as plt env = gym.envs.make(\"Breakout-v0\")", "{}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation = env.reset() print(\"Observation space shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep", "import pyplot as plt env = gym.envs.make(\"Breakout-v0\") print(\"Action space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation", "pyplot as plt env = gym.envs.make(\"Breakout-v0\") print(\"Action space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation =", "in range(100): print \"ep: \", ep state = env.reset() while True: action =", "= gym.envs.make(\"Breakout-v0\") print(\"Action space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation = env.reset() print(\"Observation space shape:", "#print(env.get_action_meanings()) observation = env.reset() print(\"Observation space shape: {}\".format(observation.shape)) plt.figure() plt.imshow(env.render(mode='rgb_array')) for ep in", "as plt env = gym.envs.make(\"Breakout-v0\") print(\"Action space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation = env.reset()", "gym.envs.make(\"Breakout-v0\") print(\"Action space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation = env.reset() print(\"Observation space shape: {}\".format(observation.shape))", "env = gym.envs.make(\"Breakout-v0\") print(\"Action space size: {}\".format(env.action_space.n)) #print(env.get_action_meanings()) observation = env.reset() print(\"Observation space", "ep state = env.reset() while True: action = np.random.choice(np.arange(env.action_space.n)) _, _, done, _", "while True: action = np.random.choice(np.arange(env.action_space.n)) _, _, done, _ = env.step(action) #plt.figure() plt.imshow(env.render(mode='rgb_array'))", "range(100): print \"ep: \", ep state = env.reset() while True: action = np.random.choice(np.arange(env.action_space.n))", "True: action = np.random.choice(np.arange(env.action_space.n)) _, _, done, _ = env.step(action) #plt.figure() plt.imshow(env.render(mode='rgb_array')) if" ]
[ "self).__init__() self.conv1 = Conv2D(32, 3, padding='same', activation='relu') self.conv2 = Conv2D(64, 3, padding='same', activation='relu')", "self.d2(x) return x def model(self): x = keras.Input(shape=(15, 15, 1)) return Model(inputs=[x], outputs=self.call(x))", "tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import Dense, Conv2D, Flatten,", "= self.dropout2(x) x = self.d2(x) return x def model(self): x = keras.Input(shape=(15, 15,", "activation='relu') self.conv2 = Conv2D(64, 3, padding='same', activation='relu') self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1", "Model class CNN(Model): def __init__(self): super(CNN, self).__init__() self.conv1 = Conv2D(32, 3, padding='same', activation='relu')", "padding='same', activation='relu') self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1 = Dropout(0.25) self.flatten = Flatten()", "def __init__(self): super(CNN, self).__init__() self.conv1 = Conv2D(32, 3, padding='same', activation='relu') self.conv2 = Conv2D(64,", "MaxPooling2D from tensorflow.keras import Model class CNN(Model): def __init__(self): super(CNN, self).__init__() self.conv1 =", "self.pool1(x) x = self.dropout1(x) x = self.flatten(x) x = self.d1(x) x = self.dropout2(x)", "= Conv2D(32, 3, padding='same', activation='relu') self.conv2 = Conv2D(64, 3, padding='same', activation='relu') self.pool1 =", "self.d1 = Dense(128, activation='relu') self.dropout2 = Dropout(0.5) self.d2 = tf.keras.layers.Dense(3, activation='softmax') def call(self,", "x = self.conv2(x) x = self.pool1(x) x = self.dropout1(x) x = self.flatten(x) x", "import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras import Model class CNN(Model): def", "Flatten, Dropout, MaxPooling2D from tensorflow.keras import Model class CNN(Model): def __init__(self): super(CNN, self).__init__()", "class CNN(Model): def __init__(self): super(CNN, self).__init__() self.conv1 = Conv2D(32, 3, padding='same', activation='relu') self.conv2", "Dropout(0.25) self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.dropout2 = Dropout(0.5) self.d2 =", "self.dropout1 = Dropout(0.25) self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.dropout2 = Dropout(0.5)", "3, padding='same', activation='relu') self.conv2 = Conv2D(64, 3, padding='same', activation='relu') self.pool1 = MaxPooling2D(pool_size=(2, 2),", "= Dense(128, activation='relu') self.dropout2 = Dropout(0.5) self.d2 = tf.keras.layers.Dense(3, activation='softmax') def call(self, x):", "Dense(128, activation='relu') self.dropout2 = Dropout(0.5) self.d2 = tf.keras.layers.Dense(3, activation='softmax') def call(self, x): x", "activation='softmax') def call(self, x): x = self.conv1(x) x = self.conv2(x) x = self.pool1(x)", "activation='relu') self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1 = Dropout(0.25) self.flatten = Flatten() self.d1", "Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras import Model class CNN(Model): def __init__(self):", "x = self.d1(x) x = self.dropout2(x) x = self.d2(x) return x def model(self):", "self.dropout2(x) x = self.d2(x) return x def model(self): x = keras.Input(shape=(15, 15, 1))", "self.conv2 = Conv2D(64, 3, padding='same', activation='relu') self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1 =", "self.dropout2 = Dropout(0.5) self.d2 = tf.keras.layers.Dense(3, activation='softmax') def call(self, x): x = self.conv1(x)", "tf from tensorflow import keras from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D", "= self.d1(x) x = self.dropout2(x) x = self.d2(x) return x def model(self): x", "tensorflow import keras from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras", "x): x = self.conv1(x) x = self.conv2(x) x = self.pool1(x) x = self.dropout1(x)", "import keras from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras import", "= self.d2(x) return x def model(self): x = keras.Input(shape=(15, 15, 1)) return Model(inputs=[x],", "= Dropout(0.5) self.d2 = tf.keras.layers.Dense(3, activation='softmax') def call(self, x): x = self.conv1(x) x", "Conv2D(32, 3, padding='same', activation='relu') self.conv2 = Conv2D(64, 3, padding='same', activation='relu') self.pool1 = MaxPooling2D(pool_size=(2,", "Conv2D(64, 3, padding='same', activation='relu') self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1 = Dropout(0.25) self.flatten", "x = self.d2(x) return x def model(self): x = keras.Input(shape=(15, 15, 1)) return", "self.flatten(x) x = self.d1(x) x = self.dropout2(x) x = self.d2(x) return x def", "tf.keras.layers.Dense(3, activation='softmax') def call(self, x): x = self.conv1(x) x = self.conv2(x) x =", "x = self.conv1(x) x = self.conv2(x) x = self.pool1(x) x = self.dropout1(x) x", "x = self.dropout1(x) x = self.flatten(x) x = self.d1(x) x = self.dropout2(x) x", "Dropout, MaxPooling2D from tensorflow.keras import Model class CNN(Model): def __init__(self): super(CNN, self).__init__() self.conv1", "x = self.pool1(x) x = self.dropout1(x) x = self.flatten(x) x = self.d1(x) x", "self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1 = Dropout(0.25) self.flatten = Flatten() self.d1 =", "= tf.keras.layers.Dense(3, activation='softmax') def call(self, x): x = self.conv1(x) x = self.conv2(x) x", "activation='relu') self.dropout2 = Dropout(0.5) self.d2 = tf.keras.layers.Dense(3, activation='softmax') def call(self, x): x =", "self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.dropout2 = Dropout(0.5) self.d2 = tf.keras.layers.Dense(3,", "call(self, x): x = self.conv1(x) x = self.conv2(x) x = self.pool1(x) x =", "super(CNN, self).__init__() self.conv1 = Conv2D(32, 3, padding='same', activation='relu') self.conv2 = Conv2D(64, 3, padding='same',", "MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1 = Dropout(0.25) self.flatten = Flatten() self.d1 = Dense(128, activation='relu')", "self.conv1 = Conv2D(32, 3, padding='same', activation='relu') self.conv2 = Conv2D(64, 3, padding='same', activation='relu') self.pool1", "Flatten() self.d1 = Dense(128, activation='relu') self.dropout2 = Dropout(0.5) self.d2 = tf.keras.layers.Dense(3, activation='softmax') def", "as tf from tensorflow import keras from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout,", "padding='same') self.dropout1 = Dropout(0.25) self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.dropout2 =", "tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras import Model class CNN(Model):", "3, padding='same', activation='relu') self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1 = Dropout(0.25) self.flatten =", "CNN(Model): def __init__(self): super(CNN, self).__init__() self.conv1 = Conv2D(32, 3, padding='same', activation='relu') self.conv2 =", "= MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1 = Dropout(0.25) self.flatten = Flatten() self.d1 = Dense(128,", "= Dropout(0.25) self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.dropout2 = Dropout(0.5) self.d2", "__init__(self): super(CNN, self).__init__() self.conv1 = Conv2D(32, 3, padding='same', activation='relu') self.conv2 = Conv2D(64, 3,", "= self.conv1(x) x = self.conv2(x) x = self.pool1(x) x = self.dropout1(x) x =", "def call(self, x): x = self.conv1(x) x = self.conv2(x) x = self.pool1(x) x", "self.d1(x) x = self.dropout2(x) x = self.d2(x) return x def model(self): x =", "from tensorflow.keras import Model class CNN(Model): def __init__(self): super(CNN, self).__init__() self.conv1 = Conv2D(32,", "Dropout(0.5) self.d2 = tf.keras.layers.Dense(3, activation='softmax') def call(self, x): x = self.conv1(x) x =", "self.d2 = tf.keras.layers.Dense(3, activation='softmax') def call(self, x): x = self.conv1(x) x = self.conv2(x)", "tensorflow.keras import Model class CNN(Model): def __init__(self): super(CNN, self).__init__() self.conv1 = Conv2D(32, 3,", "Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras import Model class CNN(Model): def __init__(self): super(CNN,", "from tensorflow import keras from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from", "= Flatten() self.d1 = Dense(128, activation='relu') self.dropout2 = Dropout(0.5) self.d2 = tf.keras.layers.Dense(3, activation='softmax')", "= self.conv2(x) x = self.pool1(x) x = self.dropout1(x) x = self.flatten(x) x =", "padding='same', activation='relu') self.conv2 = Conv2D(64, 3, padding='same', activation='relu') self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same')", "= self.pool1(x) x = self.dropout1(x) x = self.flatten(x) x = self.d1(x) x =", "= self.flatten(x) x = self.d1(x) x = self.dropout2(x) x = self.d2(x) return x", "self.conv1(x) x = self.conv2(x) x = self.pool1(x) x = self.dropout1(x) x = self.flatten(x)", "2), padding='same') self.dropout1 = Dropout(0.25) self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.dropout2", "= self.dropout1(x) x = self.flatten(x) x = self.d1(x) x = self.dropout2(x) x =", "x = self.dropout2(x) x = self.d2(x) return x def model(self): x = keras.Input(shape=(15,", "import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import Dense, Conv2D,", "from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras import Model class", "import Model class CNN(Model): def __init__(self): super(CNN, self).__init__() self.conv1 = Conv2D(32, 3, padding='same',", "= Conv2D(64, 3, padding='same', activation='relu') self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same') self.dropout1 = Dropout(0.25)", "self.conv2(x) x = self.pool1(x) x = self.dropout1(x) x = self.flatten(x) x = self.d1(x)", "self.dropout1(x) x = self.flatten(x) x = self.d1(x) x = self.dropout2(x) x = self.d2(x)", "x = self.flatten(x) x = self.d1(x) x = self.dropout2(x) x = self.d2(x) return", "keras from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D from tensorflow.keras import Model" ]
[ "## (option) test_data generation para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150) test_data =", "Show fitting results result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150) result_data = result_model.ode_sol()", "0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for key, value in A2} print(para) \"\"\" #", "= md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5, tend=150) model_data = para_model.ode_sol() mse = model_data[\"solution\"][:, 1]", "from lmfit import Parameters, minimize, report_fit import pandas as pd # Method One:", "False, None, None, None, None), ('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for key,", "key, value in A2} print(para) \"\"\" # define error function def error(para): para_model", "A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for key, value in A2} print(para) \"\"\" # define error", "test_data generation para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150) test_data = para_test_model.ode_sol() plt.figure(1)", "data-data needed return mse out = minimize(error, para_estimated) report_fit(out.params) print(error(out.params)) # Show fitting", "md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150) result_data = result_model.ode_sol() plt.figure(2) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1],", "('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for key, value in A2} print(para) \"\"\"", "from lmfit import Parameters, minimize, report_fit para_test=Parameters() para_test.add_many(('amp', 10, True, None, None, None,", "#https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0, max=1) para_estimated.add('gamma', value=0.02, min=0, max=1) \"\"\" from lmfit import", "by using lmfit para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0, max=1) para_estimated.add('gamma', value=0.02,", "needed return mse out = minimize(error, para_estimated) report_fit(out.params) print(error(out.params)) # Show fitting results", "1, False, None, None, None, None), ('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for", "import matplotlib.pyplot as plt import seaborn as sns from lmfit import Parameters, minimize,", "Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0, max=1) para_estimated.add('gamma', value=0.02, min=0, max=1) \"\"\" from lmfit", "= Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0, max=1) para_estimated.add('gamma', value=0.02, min=0, max=1) \"\"\" from", "tend=150) model_data = para_model.ode_sol() mse = model_data[\"solution\"][:, 1] - test_data[\"solution\"][:, 1] # only", "import division from __future__ import print_function from wbepi import basic_models as md import", "as pd # Method One: Nonlinear Least Square Method ## (option) test_data generation", "from __future__ import absolute_import from __future__ import division from __future__ import print_function from", "matplotlib.pyplot as plt import seaborn as sns from lmfit import Parameters, minimize, report_fit", "<reponame>Song921012/2021Waste_Water_Project from __future__ import absolute_import from __future__ import division from __future__ import print_function", "print(para) \"\"\" # define error function def error(para): para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0,", "tend=150) result_data = result_model.ode_sol() plt.figure(2) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1], \"o\") plt.plot(result_data[\"tspan\"], result_data[\"solution\"][:, 1])", "dt=5, tend=150) model_data = para_model.ode_sol() mse = model_data[\"solution\"][:, 1] - test_data[\"solution\"][:, 1] #", "result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150) result_data = result_model.ode_sol() plt.figure(2) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"],", "10, True, None, None, None, None), ('cen', 4, True, 0.0, None, None, None),", "para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5, tend=150) model_data = para_model.ode_sol() mse = model_data[\"solution\"][:,", "para_estimated.add('beta', value=0.01, min=0, max=1) para_estimated.add('gamma', value=0.02, min=0, max=1) \"\"\" from lmfit import Parameters,", "lmfit para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0, max=1) para_estimated.add('gamma', value=0.02, min=0, max=1)", "return mse out = minimize(error, para_estimated) report_fit(out.params) print(error(out.params)) # Show fitting results result_model", "import Parameters, minimize, report_fit import pandas as pd # Method One: Nonlinear Least", "('wid', 1, False, None, None, None, None), ('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value", "as md import numpy as np import matplotlib.pyplot as plt import seaborn as", "minimize, report_fit import pandas as pd # Method One: Nonlinear Least Square Method", "using lmfit para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0, max=1) para_estimated.add('gamma', value=0.02, min=0,", "= model_data[\"solution\"][:, 1] - test_data[\"solution\"][:, 1] # only data-data needed return mse out", "None), ('cen', 4, True, 0.0, None, None, None), ('wid', 1, False, None, None,", "None, None, None, None), ('cen', 4, True, 0.0, None, None, None), ('wid', 1,", "0.0, None, None, None), ('wid', 1, False, None, None, None, None), ('frac', 0.5))", "para={key:value for key, value in A2} print(para) \"\"\" # define error function def", "def error(para): para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5, tend=150) model_data = para_model.ode_sol() mse", "value in A2} print(para) \"\"\" # define error function def error(para): para_model =", "('cen', 4, True, 0.0, None, None, None), ('wid', 1, False, None, None, None,", "import print_function from wbepi import basic_models as md import numpy as np import", "md import numpy as np import matplotlib.pyplot as plt import seaborn as sns", "report_fit import pandas as pd # Method One: Nonlinear Least Square Method ##", "## parameter estimation by using lmfit para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0,", "__future__ import print_function from wbepi import basic_models as md import numpy as np", "None, None, None, None), ('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for key, value", "pd # Method One: Nonlinear Least Square Method ## (option) test_data generation para_test_model", "Parameters, minimize, report_fit para_test=Parameters() para_test.add_many(('amp', 10, True, None, None, None, None), ('cen', 4,", "para_test=Parameters() para_test.add_many(('amp', 10, True, None, None, None, None), ('cen', 4, True, 0.0, None,", "sns from lmfit import Parameters, minimize, report_fit import pandas as pd # Method", "md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150) test_data = para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1])", "\"\"\" # define error function def error(para): para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5,", "mse out = minimize(error, para_estimated) report_fit(out.params) print(error(out.params)) # Show fitting results result_model =", "None), ('wid', 1, False, None, None, None, None), ('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1))", "as sns from lmfit import Parameters, minimize, report_fit import pandas as pd #", "print_function from wbepi import basic_models as md import numpy as np import matplotlib.pyplot", "tend=150) test_data = para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show() ## parameter estimation", "lmfit import Parameters, minimize, report_fit para_test=Parameters() para_test.add_many(('amp', 10, True, None, None, None, None),", "= md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150) result_data = result_model.ode_sol() plt.figure(2) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:,", "para_estimated.add('gamma', value=0.02, min=0, max=1) \"\"\" from lmfit import Parameters, minimize, report_fit para_test=Parameters() para_test.add_many(('amp',", "True, 0.0, None, None, None), ('wid', 1, False, None, None, None, None), ('frac',", "np import matplotlib.pyplot as plt import seaborn as sns from lmfit import Parameters,", "4, True, 0.0, None, None, None), ('wid', 1, False, None, None, None, None),", "1] - test_data[\"solution\"][:, 1] # only data-data needed return mse out = minimize(error,", "# Method One: Nonlinear Least Square Method ## (option) test_data generation para_test_model =", "max=1) \"\"\" from lmfit import Parameters, minimize, report_fit para_test=Parameters() para_test.add_many(('amp', 10, True, None,", "parameter estimation by using lmfit para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0, max=1)", "import seaborn as sns from lmfit import Parameters, minimize, report_fit import pandas as", "One: Nonlinear Least Square Method ## (option) test_data generation para_test_model = md.SIR(beta=0.2, gamma=0.1,", "para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150) test_data = para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"],", "(option) test_data generation para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150) test_data = para_test_model.ode_sol()", "generation para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150) test_data = para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\")", "Square Method ## (option) test_data generation para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150)", "import basic_models as md import numpy as np import matplotlib.pyplot as plt import", "wbepi import basic_models as md import numpy as np import matplotlib.pyplot as plt", "t0=0, dt=1, tend=150) result_data = result_model.ode_sol() plt.figure(2) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1], \"o\") plt.plot(result_data[\"tspan\"],", "Method ## (option) test_data generation para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150) test_data", "\"\"\" from lmfit import Parameters, minimize, report_fit para_test=Parameters() para_test.add_many(('amp', 10, True, None, None,", "report_fit para_test=Parameters() para_test.add_many(('amp', 10, True, None, None, None, None), ('cen', 4, True, 0.0,", "para_estimated) report_fit(out.params) print(error(out.params)) # Show fitting results result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1,", "para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show() ## parameter estimation by using lmfit", "min=0, max=1) para_estimated.add('gamma', value=0.02, min=0, max=1) \"\"\" from lmfit import Parameters, minimize, report_fit", "para_model.ode_sol() mse = model_data[\"solution\"][:, 1] - test_data[\"solution\"][:, 1] # only data-data needed return", "fitting results result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150) result_data = result_model.ode_sol() plt.figure(2)", "test_data = para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show() ## parameter estimation by", "define error function def error(para): para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5, tend=150) model_data", "estimation by using lmfit para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0, max=1) para_estimated.add('gamma',", "gamma=para[\"gamma\"], t0=0, dt=5, tend=150) model_data = para_model.ode_sol() mse = model_data[\"solution\"][:, 1] - test_data[\"solution\"][:,", "result_data = result_model.ode_sol() plt.figure(2) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1], \"o\") plt.plot(result_data[\"tspan\"], result_data[\"solution\"][:, 1]) plt.show()", "para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01, min=0, max=1) para_estimated.add('gamma', value=0.02, min=0, max=1) \"\"\"", "plt import seaborn as sns from lmfit import Parameters, minimize, report_fit import pandas", "Nonlinear Least Square Method ## (option) test_data generation para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0,", "error(para): para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5, tend=150) model_data = para_model.ode_sol() mse =", "gamma=0.1, t0=0, dt=5, tend=150) test_data = para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show()", "md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5, tend=150) model_data = para_model.ode_sol() mse = model_data[\"solution\"][:, 1] -", "report_fit(out.params) print(error(out.params)) # Show fitting results result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150)", "print(type(A2)==type(A1)) para={key:value for key, value in A2} print(para) \"\"\" # define error function", "value=0.02, min=0, max=1) \"\"\" from lmfit import Parameters, minimize, report_fit para_test=Parameters() para_test.add_many(('amp', 10,", "t0=0, dt=5, tend=150) model_data = para_model.ode_sol() mse = model_data[\"solution\"][:, 1] - test_data[\"solution\"][:, 1]", "model_data[\"solution\"][:, 1] - test_data[\"solution\"][:, 1] # only data-data needed return mse out =", "plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show() ## parameter estimation by using lmfit para_estimated = Parameters()", "basic_models as md import numpy as np import matplotlib.pyplot as plt import seaborn", "dt=5, tend=150) test_data = para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show() ## parameter", "sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show() ## parameter estimation by using lmfit para_estimated =", "None, None, None), ('cen', 4, True, 0.0, None, None, None), ('wid', 1, False,", "# only data-data needed return mse out = minimize(error, para_estimated) report_fit(out.params) print(error(out.params)) #", "= para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show() ## parameter estimation by using", "Method One: Nonlinear Least Square Method ## (option) test_data generation para_test_model = md.SIR(beta=0.2,", "Least Square Method ## (option) test_data generation para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5,", "None, None), ('cen', 4, True, 0.0, None, None, None), ('wid', 1, False, None,", "for key, value in A2} print(para) \"\"\" # define error function def error(para):", "1]) plt.show() ## parameter estimation by using lmfit para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta',", "test_data[\"solution\"][:, 1]) plt.show() ## parameter estimation by using lmfit para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html", "import pandas as pd # Method One: Nonlinear Least Square Method ## (option)", "# Show fitting results result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150) result_data =", "None, None), ('wid', 1, False, None, None, None, None), ('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items()", "1] # only data-data needed return mse out = minimize(error, para_estimated) report_fit(out.params) print(error(out.params))", "function def error(para): para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5, tend=150) model_data = para_model.ode_sol()", "out = minimize(error, para_estimated) report_fit(out.params) print(error(out.params)) # Show fitting results result_model = md.SIR(beta=out.params[\"beta\"],", "mse = model_data[\"solution\"][:, 1] - test_data[\"solution\"][:, 1] # only data-data needed return mse", "= para_model.ode_sol() mse = model_data[\"solution\"][:, 1] - test_data[\"solution\"][:, 1] # only data-data needed", "from __future__ import division from __future__ import print_function from wbepi import basic_models as", "None, None), ('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for key, value in A2}", "A2} print(para) \"\"\" # define error function def error(para): para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"],", "gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150) result_data = result_model.ode_sol() plt.figure(2) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1], \"o\")", "minimize, report_fit para_test=Parameters() para_test.add_many(('amp', 10, True, None, None, None, None), ('cen', 4, True,", "max=1) para_estimated.add('gamma', value=0.02, min=0, max=1) \"\"\" from lmfit import Parameters, minimize, report_fit para_test=Parameters()", "plt.show() ## parameter estimation by using lmfit para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html para_estimated.add('beta', value=0.01,", "None), ('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for key, value in A2} print(para)", "model_data = para_model.ode_sol() mse = model_data[\"solution\"][:, 1] - test_data[\"solution\"][:, 1] # only data-data", "from __future__ import print_function from wbepi import basic_models as md import numpy as", "__future__ import division from __future__ import print_function from wbepi import basic_models as md", "numpy as np import matplotlib.pyplot as plt import seaborn as sns from lmfit", "value=0.01, min=0, max=1) para_estimated.add('gamma', value=0.02, min=0, max=1) \"\"\" from lmfit import Parameters, minimize,", "test_data[\"solution\"][:, 1] # only data-data needed return mse out = minimize(error, para_estimated) report_fit(out.params)", "pandas as pd # Method One: Nonlinear Least Square Method ## (option) test_data", "dt=1, tend=150) result_data = result_model.ode_sol() plt.figure(2) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1], \"o\") plt.plot(result_data[\"tspan\"], result_data[\"solution\"][:,", "= md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150) test_data = para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:,", "as np import matplotlib.pyplot as plt import seaborn as sns from lmfit import", "__future__ import absolute_import from __future__ import division from __future__ import print_function from wbepi", "absolute_import from __future__ import division from __future__ import print_function from wbepi import basic_models", "plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show() ## parameter estimation by using lmfit para_estimated", "from wbepi import basic_models as md import numpy as np import matplotlib.pyplot as", "error function def error(para): para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5, tend=150) model_data =", "import absolute_import from __future__ import division from __future__ import print_function from wbepi import", "True, None, None, None, None), ('cen', 4, True, 0.0, None, None, None), ('wid',", "- test_data[\"solution\"][:, 1] # only data-data needed return mse out = minimize(error, para_estimated)", "t0=0, dt=5, tend=150) test_data = para_test_model.ode_sol() plt.figure(1) sns.set_theme(style=\"darkgrid\") plt.plot(test_data[\"tspan\"], test_data[\"solution\"][:, 1]) plt.show() ##", "minimize(error, para_estimated) report_fit(out.params) print(error(out.params)) # Show fitting results result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0,", "as plt import seaborn as sns from lmfit import Parameters, minimize, report_fit import", "print(error(out.params)) # Show fitting results result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150) result_data", "import Parameters, minimize, report_fit para_test=Parameters() para_test.add_many(('amp', 10, True, None, None, None, None), ('cen',", "Parameters, minimize, report_fit import pandas as pd # Method One: Nonlinear Least Square", "para_test.add_many(('amp', 10, True, None, None, None, None), ('cen', 4, True, 0.0, None, None,", "only data-data needed return mse out = minimize(error, para_estimated) report_fit(out.params) print(error(out.params)) # Show", "min=0, max=1) \"\"\" from lmfit import Parameters, minimize, report_fit para_test=Parameters() para_test.add_many(('amp', 10, True,", "in A2} print(para) \"\"\" # define error function def error(para): para_model = md.SIR(beta=para[\"beta\"],", "division from __future__ import print_function from wbepi import basic_models as md import numpy", "lmfit import Parameters, minimize, report_fit import pandas as pd # Method One: Nonlinear", "A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for key, value in A2} print(para) \"\"\" # define", "None, None, None), ('wid', 1, False, None, None, None, None), ('frac', 0.5)) A1={\"te\":1}.items()", "= minimize(error, para_estimated) report_fit(out.params) print(error(out.params)) # Show fitting results result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"],", "import numpy as np import matplotlib.pyplot as plt import seaborn as sns from", "results result_model = md.SIR(beta=out.params[\"beta\"], gamma=out.params[\"gamma\"], t0=0, dt=1, tend=150) result_data = result_model.ode_sol() plt.figure(2) sns.set_theme(style=\"darkgrid\")", "# define error function def error(para): para_model = md.SIR(beta=para[\"beta\"], gamma=para[\"gamma\"], t0=0, dt=5, tend=150)", "seaborn as sns from lmfit import Parameters, minimize, report_fit import pandas as pd", "None, None, None), ('frac', 0.5)) A1={\"te\":1}.items() A2=para_test.valuesdict().items() print(type(A2)==type(A1)) para={key:value for key, value in" ]
[ "weren't able to make a writeable one in the main tree -- use", "print(\"get_ipython not found\") return None get_ipython().magic(\"matplotlib inline\") settings.display.reset = _display_reset, settings_manager.DocString(\"call this to", "make a cache subdir, if so required if subdir: cacheurl += \"/\" +", "a .radiopadre dir within, and make a symlink to it in the shadow", "are initialized, does various other init\"\"\" global _init_js_side_done if _init_js_side_done: print(\"init_js_side already done\")", "to view non-writeable directory, but access to the shadow tree is not set", "import layouts html += layouts.init_html from radiopadre_kernel import js9 if not js9.JS9_ERROR: html", "\\ cell_width, window_width, window_height # def protect(author=None): # \"\"\"Makes current notebook protected with", "now unprotected. # All users can treat it as read-write.\"\"\"))) # def copy_current_notebook(oldpath,", "select \"Cell|Run all\" from the menu to render this notebook. \"\"\" % (newpath,", "__init: from radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS side\") # print(\"initializing radiopadre\") _init_js_side()", "\"/.radiopadre\" if not os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir, os.W_OK): if not os.path.exists(shadowdir): os.system(\"mkdir -p", "# All users can treat it as read-write.\"\"\"))) # def copy_current_notebook(oldpath, newpath, cell=0,", "if not __init: from radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS side\") # print(\"initializing", "stuff is setup by the kernel, pull from it from radiopadre_kernel import SESSION_ID,", "a writeable one in the main tree -- use shadow tree # if", "\"\" reldir = basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying", "radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS side\") # print(\"initializing radiopadre\") _init_js_side() __init =", "from init errors = radiopadre_kernel.log_handler.get_records('WARNING') if errors: html += render_table(errors, [\"\", \"\"], numbering=False)", "read-write.\"\"\"))) # def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'): # read notebook data data", "lsR, lst, lsrt from .filelist import FileList from .fitsfile import FITSFile from .imagefile", "\"/.radiopadre\" if not os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir)) if not os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache", "\"\"], numbering=False))) show_status = display_status show_log = display_log def get_cache_dir(path, subdir=None): \"\"\" Creates", "done\") return _init_js_side_done = True try: get_ipython except: print(\"get_ipython not found\") return None", "from .datadir import DataDir, ls, lsR, lst, lsrt from .filelist import FileList from", "indicated one del cells[:cell + 1] # scrub cell output for c in", "getattr(nbformat, 'v' + str(current_version)) # accommodate worksheets, if available if hasattr(nbdata, 'worksheets'): raise", "we're stuck, so may as well bomb out if cachedir is None: if", "modules if not NBCONVERT: from . import fitsfile html += fitsfile.add_general_buttons() # get", "in the main tree -- use shadow tree # if this fails, we're", "not __init: from radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS side\") # print(\"initializing radiopadre\")", "__version__ = pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__ = \"development\" ## various notebook-related init try:", "parent): return subdir == parent or subdir.startswith(parent+\"/\") from radiopadre_kernel import _make_symlink def display_status():", "to treat this notebook as read-only.\"\"\" % author))) # # # def unprotect():", "cachedir = basedir + \"/.radiopadre\" if not os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir, os.W_OK): if", "0 if 'signature' in metadata: metadata['signature'] = \"\" # save nbformat.write(nbdata, open(newpath, 'w'),", "return newpath __init = False # print(\"importing radiopadre\") if not __init: from radiopadre_kernel", "current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook was automatically generated from ``%s`` using the 'copy notebook'", "None, we weren't able to make a writeable one in the main tree", "display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width, window_width, window_height): if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width,", "from .file import autodetect_file_type from .datadir import DataDir, ls, lsR, lst, lsrt from", "author))) # # # def unprotect(): # \"\"\"Makes current notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\"))", "= open(oldpath).read() version = json.loads(data)['nbformat'] nbdata = nbformat.reads(data, version) nbdata.keys() # convert to", "shadowdir += \"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir = shadowdir else: cachedir = None #", "layouts html += layouts.init_html from radiopadre_kernel import js9 if not js9.JS9_ERROR: html +=", "None get_ipython().magic(\"matplotlib inline\") settings.display.reset = _display_reset, settings_manager.DocString(\"call this to reset sizes explicitly\") html", "notebook data data = open(oldpath).read() version = json.loads(data)['nbformat'] nbdata = nbformat.reads(data, version) nbdata.keys()", "nbformat import os import pkg_resources import radiopadre_kernel from IPython.display import display, HTML, Javascript", "name. Protected notebooks won't be saved # unless the user matches the author.\"\"\"", "cacheurl = CACHE_URL_ROOT + reldir + \"/.radiopadre\" shadowdir = SHADOW_HOME + basedir cachedir", "RuntimeError(\"Cache directory {} not user-writeable. Try removing it?\".format(cachedir)) # make a cache subdir,", "but reloading is useful for debugging styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html += f\"\"\"<style>", "import FileList from .fitsfile import FITSFile from .imagefile import ImageFile from .casatable import", "directory for caching radiopadre stuff associated with the given file. Returns tuple of", "window_width, window_height # def protect(author=None): # \"\"\"Makes current notebook protected with the given", "All other users will have to treat this notebook as read-only.\"\"\" % author)))", "SESSION_DIR, SESSION_URL, NBCONVERT # init settings settings = settings_manager.RadiopadreSettingsManager() try: __version__ = pkg_resources.require(\"radiopadre\")[0].version", "read-only.\"\"\" % author))) # # # def unprotect(): # \"\"\"Makes current notebook unprotected.\"\"\"", "numbering=False))) show_status = display_status show_log = display_log def get_cache_dir(path, subdir=None): \"\"\" Creates directory", "import CasaTable from .htmlfile import HTMLFile, URL from .table import tabulate from .render", "radiopadre_kernel from IPython.display import display, HTML, Javascript from radiopadre_utils.notebook_utils import scrub_cell from radiopadre", "+ str(current_version)) # accommodate worksheets, if available if hasattr(nbdata, 'worksheets'): raise (RuntimeError, \"copy_current_notebook:", "import autodetect_file_type from .datadir import DataDir, ls, lsR, lst, lsrt from .filelist import", "SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data += [(\"\", \"startup", "'copy notebook' feature. Please select \"Cell|Run all\" from the menu to render this", "make a .radiopadre dir within, and make a symlink to it in the", "to it in the shadow tree. if os.access(basedir, os.W_OK): cachedir = basedir +", "the author.\"\"\" # author = author or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" % author)) #", "NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL, NBCONVERT # init settings settings = settings_manager.RadiopadreSettingsManager()", "out all cells up to and including indicated one del cells[:cell + 1]", "= radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else \"INFO\") display(HTML(render_table(data, [\"\", \"\"], numbering=False))) show_status = display_status", "None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks that Javascript components of radiopadre are", "print(\"importing radiopadre\") if not __init: from radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS side\")", "# get list of warnings and errors from init errors = radiopadre_kernel.log_handler.get_records('WARNING') if", "HTMLFile, URL from .table import tabulate from .render import render_table, render_preamble, render_refresh_button, render_status_message,", "if copy_root: code += \"\\n%s = %s[0]\" % (copy_root, copy_dirs) code += \"\\n%s.show()\"", "the kernel, pull from it from radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME, \\ LOGFILE,", "tree. if os.access(basedir, os.W_OK): cachedir = basedir + \"/.radiopadre\" if not os.path.exists(cachedir): os.mkdir(cachedir)", "other init\"\"\" global _init_js_side_done if _init_js_side_done: print(\"init_js_side already done\") return _init_js_side_done = True", "from radiopadre import layouts html += layouts.init_html from radiopadre_kernel import js9 if not", "write to the basedir, make a .radiopadre dir within, and make a symlink", "off the root, this becomes the relative path to it, else \"\" reldir", "TRALING SLASH!!! def _strip_slash(path): return path if path == \"/\" or path is", "boilerplate code code = \"import radiopadre\\n\" + \\ \"%s = radiopadre.DirList('.')\" % copy_dirs", "= \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) # reload styles -- loaded from", "from .casatable import CasaTable from .htmlfile import HTMLFile, URL from .table import tabulate", "basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying to access {},", "matches the author.\"\"\" # author = author or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" % author))", "from radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS side\") # print(\"initializing radiopadre\") _init_js_side() __init", "os.access(basedir, os.W_OK): cachedir = basedir + \"/.radiopadre\" if not os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir,", "CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data += [(\"\", \"startup log follows:\")] data", "basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying to access {}, which is outside the {} hierarchy\".format(basedir,", "nbdata['cells'] # strip out all cells up to and including indicated one del", "or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" % author)) # display(HTML(render_status_message(\"\"\"This notebook is now protected, author", "os.getcwd()) ] for varname in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR", "out if cachedir is None: if not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to view non-writeable", "SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL, NBCONVERT #", "_init_js_side_done if _init_js_side_done: print(\"init_js_side already done\") return _init_js_side_done = True try: get_ipython except:", "None # if cachedir remains None, we weren't able to make a writeable", "code += \"\\n%s.show()\" % copy_dirs # insert output output = current_format.new_output(\"display_data\", data={ \"text/html\":", ")) # cleanup metadata metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub'] = 0 if 'signature' in", "data += radiopadre_kernel.log_handler.get_records() from IPython.display import HTML display(HTML(render_table(data, [\"\", \"\"], numbering=False))) def display_log(debug=False):", "format current_version = nbformat.current_nbformat nbdata = nbformat.convert(nbdata, current_version) current_format = getattr(nbformat, 'v' +", "= nbformat.reads(data, version) nbdata.keys() # convert to current format current_version = nbformat.current_nbformat nbdata", "that Javascript components of radiopadre are initialized, does various other init\"\"\" global _init_js_side_done", "nbdata = nbformat.convert(nbdata, current_version) current_format = getattr(nbformat, 'v' + str(current_version)) # accommodate worksheets,", "author is \"%s\". # All other users will have to treat this notebook", "# if cachedir remains None, we weren't able to make a writeable one", "parent or subdir.startswith(parent+\"/\") from radiopadre_kernel import _make_symlink def display_status(): # setup status data", "numbering=False) display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width, window_width, window_height): if settings.display.auto_reset: settings.display.cell_width,", "if path == \"/\" or path is None else path.rstrip(\"/\") def _is_subdir(subdir, parent):", "cachedir = shadowdir else: cachedir = None # if cachedir remains None, we", "stuff from .file import autodetect_file_type from .datadir import DataDir, ls, lsR, lst, lsrt", "display_status(): # setup status data = [ (\"cwd\", os.getcwd()) ] for varname in", "the directory. The latter is the URL to this directory. \"\"\" if ABSROOTDIR", "+= \"/\" + subdir cachedir += \"/\" + subdir if not os.path.exists(cachedir): os.mkdir(cachedir)", "window_height): if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height = \\ cell_width, window_width, window_height # def", "errors: html += render_table(errors, [\"\", \"\"], numbering=False) display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def", "not NBCONVERT: from . import fitsfile html += fitsfile.add_general_buttons() # get list of", "_is_subdir(subdir, parent): return subdir == parent or subdir.startswith(parent+\"/\") from radiopadre_kernel import _make_symlink def", "\"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import layouts html += layouts.init_html from radiopadre_kernel import js9", "SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR,", "The former is the (shadow) filesystem location of the directory. The latter is", "author = author or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" % author)) # display(HTML(render_status_message(\"\"\"This notebook is", "users will have to treat this notebook as read-only.\"\"\" % author))) # #", "window_width, window_height): if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height = \\ cell_width, window_width, window_height #", "cachedir = None # if we can write to the basedir, make a", "_is_subdir(basedir, ABSROOTDIR): # if in a subdirectory off the root, this becomes the", "str(current_version)) # accommodate worksheets, if available if hasattr(nbdata, 'worksheets'): raise (RuntimeError, \"copy_current_notebook: not", "window_height # def protect(author=None): # \"\"\"Makes current notebook protected with the given author", "metadata = nbdata['metadata'] cells = nbdata['cells'] # strip out all cells up to", "SESSION_ID, VERBOSE, HOSTNAME, \\ LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR,", "path.rstrip(\"/\") def _is_subdir(subdir, parent): return subdir == parent or subdir.startswith(parent+\"/\") from radiopadre_kernel import", "red'>Please select Cell|Run all from the menu to render this notebook.</b>\"] }) cells.insert(0,", "radiopadre JS side\") # print(\"initializing radiopadre\") _init_js_side() __init = True # import stuff", "subdirectory off the root, this becomes the relative path to it, else \"\"", "def _init_js_side(): \"\"\"Checks that Javascript components of radiopadre are initialized, does various other", "be saved # unless the user matches the author.\"\"\" # author = author", "= nbdata['metadata'] cells = nbdata['cells'] # strip out all cells up to and", "is useful for debugging styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html += f\"\"\"<style> {open(styles_file).read()} </style>\"\"\"", "def _strip_slash(path): return path if path == \"/\" or path is None else", "except ImportError: radiopadre_kernel.log.warning(\"Failed to import astropy\") # NONE OF THE DIR NAMES ABOVE", "__init = False # print(\"importing radiopadre\") if not __init: from radiopadre_kernel import casacore_tables", "CACHE_URL_ROOT + reldir + \"/.radiopadre\" shadowdir = SHADOW_HOME + basedir cachedir = None", "version) return newpath __init = False # print(\"importing radiopadre\") if not __init: from", "def _is_subdir(subdir, parent): return subdir == parent or subdir.startswith(parent+\"/\") from radiopadre_kernel import _make_symlink", "if cachedir is None: if not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to view non-writeable directory,", "= 0 metadata['radiopadre_notebook_scrub'] = 0 if 'signature' in metadata: metadata['signature'] = \"\" #", "as read-write.\"\"\"))) # def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'): # read notebook data", "from radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME, \\ LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR,", "hierarchy\".format(basedir, ABSROOTDIR)) cacheurl = CACHE_URL_ROOT + reldir + \"/.radiopadre\" shadowdir = SHADOW_HOME +", "js9.JS9_INIT_HTML_DYNAMIC # get buttons from various modules if not NBCONVERT: from . import", "# def unprotect(): # \"\"\"Makes current notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook", "not os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir, os.W_OK): if not os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir", "= \"development\" ## various notebook-related init try: import astropy astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed", "up. This is a bug.\") cachedir = shadowdir + \"/.radiopadre\" if not os.path.exists(cachedir):", "+ subdir if not os.path.exists(cachedir): os.mkdir(cachedir) return cachedir, cacheurl _init_js_side_done = None def", "does various other init\"\"\" global _init_js_side_done if _init_js_side_done: print(\"init_js_side already done\") return _init_js_side_done", "saved # unless the user matches the author.\"\"\" # author = author or", "various other init\"\"\" global _init_js_side_done if _init_js_side_done: print(\"init_js_side already done\") return _init_js_side_done =", "notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook is now unprotected. # All users", "errors from init errors = radiopadre_kernel.log_handler.get_records('WARNING') if errors: html += render_table(errors, [\"\", \"\"],", "# insert boilerplate code code = \"import radiopadre\\n\" + \\ \"%s = radiopadre.DirList('.')\"", "\\ SESSION_DIR, SESSION_URL, NBCONVERT # init settings settings = settings_manager.RadiopadreSettingsManager() try: __version__ =", "within, and make a symlink to it in the shadow tree. if os.access(basedir,", "notebook protected with the given author name. Protected notebooks won't be saved #", "if available if hasattr(nbdata, 'worksheets'): raise (RuntimeError, \"copy_current_notebook: not compatible with worksheets\") metadata", "HAVE A TRALING SLASH!!! def _strip_slash(path): return path if path == \"/\" or", "to it, else \"\" reldir = basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):]", "HTML display(HTML(render_table(data, [\"\", \"\"], numbering=False))) def display_log(debug=False): from IPython.display import HTML data =", "subdir=None): \"\"\" Creates directory for caching radiopadre stuff associated with the given file.", "SESSION_URL, NBCONVERT # init settings settings = settings_manager.RadiopadreSettingsManager() try: __version__ = pkg_resources.require(\"radiopadre\")[0].version except", "which is outside the {} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl = CACHE_URL_ROOT + reldir +", "except: print(\"get_ipython not found\") return None get_ipython().magic(\"matplotlib inline\") settings.display.reset = _display_reset, settings_manager.DocString(\"call this", "render_status_message, render_table # this stuff is setup by the kernel, pull from it", "symlink to it in the shadow tree. if os.access(basedir, os.W_OK): cachedir = basedir", "view non-writeable directory, but access to the shadow tree is not set up.", "-p {}\".format(cachedir)) if not os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache directory {} not user-writeable. Try", "== \"/\" or path is None else path.rstrip(\"/\") def _is_subdir(subdir, parent): return subdir", "cachedir remains None, we weren't able to make a writeable one in the", "# print(\"initializing radiopadre\") _init_js_side() __init = True # import stuff from .file import", "if not NBCONVERT: from . import fitsfile html += fitsfile.add_general_buttons() # get list", "raise RuntimeError(\"radiopadre.init() must be called first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR): #", "from . import fitsfile html += fitsfile.add_general_buttons() # get list of warnings and", "shadowdir + \"/.radiopadre\" if not os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir)) if not os.access(cachedir, os.W_OK):", "current_format = getattr(nbformat, 'v' + str(current_version)) # accommodate worksheets, if available if hasattr(nbdata,", "from radiopadre.render import render_error, show_exception, TransientMessage, render_status_message, render_table # this stuff is setup", "pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__ = \"development\" ## various notebook-related init try: import astropy", "display_log(debug=False): from IPython.display import HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else \"INFO\") display(HTML(render_table(data,", "else: raise RuntimeError(\"Trying to access {}, which is outside the {} hierarchy\".format(basedir, ABSROOTDIR))", "from radiopadre import settings_manager from radiopadre.render import render_error, show_exception, TransientMessage, render_status_message, render_table #", "= basedir + \"/.radiopadre\" if not os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir, os.W_OK): if not", "= basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying to access", "basedir + \"/.radiopadre\" if not os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir, os.W_OK): if not os.path.exists(shadowdir):", "radiopadre\") if not __init: from radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS side\") #", "pull from it from radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME, \\ LOGFILE, ABSROOTDIR, ROOTDIR,", "def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'): # read notebook data data = open(oldpath).read()", "Protected notebooks won't be saved # unless the user matches the author.\"\"\" #", "to make a writeable one in the main tree -- use shadow tree", "print(\"initializing radiopadre\") _init_js_side() __init = True # import stuff from .file import autodetect_file_type", "radiopadre.render import render_error, show_exception, TransientMessage, render_status_message, render_table # this stuff is setup by", "users can treat it as read-write.\"\"\"))) # def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'):", "radiopadre import layouts html += layouts.init_html from radiopadre_kernel import js9 if not js9.JS9_ERROR:", "json.loads(data)['nbformat'] nbdata = nbformat.reads(data, version) nbdata.keys() # convert to current format current_version =", "os.W_OK): raise RuntimeError(\"Cache directory {} not user-writeable. Try removing it?\".format(cachedir)) # make a", "settings.display.window_height = \\ cell_width, window_width, window_height # def protect(author=None): # \"\"\"Makes current notebook", "html = \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) # reload styles -- loaded", "and including indicated one del cells[:cell + 1] # scrub cell output for", "% (newpath, oldpath), )) # cleanup metadata metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub'] = 0", "\\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL, NBCONVERT", "the 'copy notebook' feature. Please select \"Cell|Run all\" from the menu to render", "the shadow tree is not set up. This is a bug.\") cachedir =", "components of radiopadre are initialized, does various other init\"\"\" global _init_js_side_done if _init_js_side_done:", "DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\", "+= layouts.init_html from radiopadre_kernel import js9 if not js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC #", "directory. \"\"\" if ABSROOTDIR is None: raise RuntimeError(\"radiopadre.init() must be called first\") basedir", "outside the {} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl = CACHE_URL_ROOT + reldir + \"/.radiopadre\" shadowdir", "access {}, which is outside the {} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl = CACHE_URL_ROOT +", "non-writeable directory, but access to the shadow tree is not set up. This", "os.system(\"mkdir -p {}\".format(cachedir)) if not os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache directory {} not user-writeable.", "if subdir: cacheurl += \"/\" + subdir cachedir += \"/\" + subdir if", "shadow tree # if this fails, we're stuck, so may as well bomb", "notebooks won't be saved # unless the user matches the author.\"\"\" # author", "if errors: html += render_table(errors, [\"\", \"\"], numbering=False) display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\"))", "notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code, outputs=[output])) # insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook", "Please select \"Cell|Run all\" from the menu to render this notebook. \"\"\" %", "settings_manager.DocString(\"call this to reset sizes explicitly\") html = \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script>", "HOSTNAME, \\ LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\", "styles -- loaded from radiopadre-kernel.js already, but reloading is useful for debugging styles_file", "# # def unprotect(): # \"\"\"Makes current notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This", "notebook-related init try: import astropy astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed to import astropy\") #", "(real_path, url_path). The former is the (shadow) filesystem location of the directory. The", "ABSROOTDIR): # if in a subdirectory off the root, this becomes the relative", "in cells: scrub_cell(c) # insert boilerplate code code = \"import radiopadre\\n\" + \\", "FileList from .fitsfile import FITSFile from .imagefile import ImageFile from .casatable import CasaTable", "html += f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import layouts", "initialized, does various other init\"\"\" global _init_js_side_done if _init_js_side_done: print(\"init_js_side already done\") return", "``%s`` using the 'copy notebook' feature. Please select \"Cell|Run all\" from the menu", "+ \"/.radiopadre\" if not os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir, os.W_OK): if not os.path.exists(shadowdir): os.system(\"mkdir", "directory {} not user-writeable. Try removing it?\".format(cachedir)) # make a cache subdir, if", "+= \"\\n%s.show()\" % copy_dirs # insert output output = current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b", "is outside the {} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl = CACHE_URL_ROOT + reldir + \"/.radiopadre\"", "os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir)) if not os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache directory {} not", "[\"\", \"\"], numbering=False))) show_status = display_status show_log = display_log def get_cache_dir(path, subdir=None): \"\"\"", "+ subdir cachedir += \"/\" + subdir if not os.path.exists(cachedir): os.mkdir(cachedir) return cachedir,", "= author or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" % author)) # display(HTML(render_status_message(\"\"\"This notebook is now", "1] # scrub cell output for c in cells: scrub_cell(c) # insert boilerplate", "author name. Protected notebooks won't be saved # unless the user matches the", "style='color: red'>Please select Cell|Run all from the menu to render this notebook.</b>\"] })", "basedir, make a .radiopadre dir within, and make a symlink to it in", "hasattr(nbdata, 'worksheets'): raise (RuntimeError, \"copy_current_notebook: not compatible with worksheets\") metadata = nbdata['metadata'] cells", "= current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b style='color: red'>Please select Cell|Run all from the menu", "CasaTable from .htmlfile import HTMLFile, URL from .table import tabulate from .render import", "if not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to view non-writeable directory, but access to the", "copy_dirs if copy_root: code += \"\\n%s = %s[0]\" % (copy_root, copy_dirs) code +=", "accommodate worksheets, if available if hasattr(nbdata, 'worksheets'): raise (RuntimeError, \"copy_current_notebook: not compatible with", "render_table # this stuff is setup by the kernel, pull from it from", "\"\"\" if ABSROOTDIR is None: raise RuntimeError(\"radiopadre.init() must be called first\") basedir =", "if os.access(cachedir, os.W_OK): if not os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir += \"/.radiopadre\" _make_symlink(cachedir,", "shadow tree is not set up. This is a bug.\") cachedir = shadowdir", "current_format.new_code_cell(code, outputs=[output])) # insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook was automatically generated", ".casatable import CasaTable from .htmlfile import HTMLFile, URL from .table import tabulate from", "the menu to render this notebook. \"\"\" % (newpath, oldpath), )) # cleanup", "cache subdir, if so required if subdir: cacheurl += \"/\" + subdir cachedir", "subdir if not os.path.exists(cachedir): os.mkdir(cachedir) return cachedir, cacheurl _init_js_side_done = None def _display_reset():", "and make a symlink to it in the shadow tree. if os.access(basedir, os.W_OK):", "render this notebook. \"\"\" % (newpath, oldpath), )) # cleanup metadata metadata['radiopadre_notebook_protect'] =", "insert output output = current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b style='color: red'>Please select Cell|Run all", "cachedir += \"/\" + subdir if not os.path.exists(cachedir): os.mkdir(cachedir) return cachedir, cacheurl _init_js_side_done", "\"import radiopadre\\n\" + \\ \"%s = radiopadre.DirList('.')\" % copy_dirs if copy_root: code +=", "\"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir = shadowdir else: cachedir = None # if cachedir", "onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import layouts html += layouts.init_html from radiopadre_kernel import js9 if", "os.W_OK): if not os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir += \"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir", "insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook was automatically generated from ``%s`` using", "data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else \"INFO\") display(HTML(render_table(data, [\"\", \"\"], numbering=False))) show_status =", "DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname]))", "already done\") return _init_js_side_done = True try: get_ipython except: print(\"get_ipython not found\") return", "\"startup log follows:\")] data += radiopadre_kernel.log_handler.get_records() from IPython.display import HTML display(HTML(render_table(data, [\"\", \"\"],", "data = open(oldpath).read() version = json.loads(data)['nbformat'] nbdata = nbformat.reads(data, version) nbdata.keys() # convert", "else \"\" reldir = basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise", "is a bug.\") cachedir = shadowdir + \"/.radiopadre\" if not os.path.exists(cachedir): os.system(\"mkdir -p", "log follows:\")] data += radiopadre_kernel.log_handler.get_records() from IPython.display import HTML display(HTML(render_table(data, [\"\", \"\"], numbering=False)))", "= \"import radiopadre\\n\" + \\ \"%s = radiopadre.DirList('.')\" % copy_dirs if copy_root: code", "IPython.display import HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else \"INFO\") display(HTML(render_table(data, [\"\", \"\"],", "render this notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code, outputs=[output])) # insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis", "path == \"/\" or path is None else path.rstrip(\"/\") def _is_subdir(subdir, parent): return", "# def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'): # read notebook data data =", "os.path.exists(cachedir): os.mkdir(cachedir) return cachedir, cacheurl _init_js_side_done = None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side():", "'w'), version) return newpath __init = False # print(\"importing radiopadre\") if not __init:", "SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data +=", "user-writeable. Try removing it?\".format(cachedir)) # make a cache subdir, if so required if", "becomes the relative path to it, else \"\" reldir = basedir[len(ABSROOTDIR):] elif _is_subdir(basedir,", "Try removing it?\".format(cachedir)) # make a cache subdir, if so required if subdir:", ".fitsfile import FITSFile from .imagefile import ImageFile from .casatable import CasaTable from .htmlfile", ". import fitsfile html += fitsfile.add_general_buttons() # get list of warnings and errors", "import scrub_cell from radiopadre import settings_manager from radiopadre.render import render_error, show_exception, TransientMessage, render_status_message,", "notebook as read-only.\"\"\" % author))) # # # def unprotect(): # \"\"\"Makes current", "shadow tree. if os.access(basedir, os.W_OK): cachedir = basedir + \"/.radiopadre\" if not os.path.exists(cachedir):", "_strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR): # if in a subdirectory off the root, this", "using the 'copy notebook' feature. Please select \"Cell|Run all\" from the menu to", "astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed to import astropy\") # NONE OF THE DIR NAMES", "import _make_symlink def display_status(): # setup status data = [ (\"cwd\", os.getcwd()) ]", "cacheurl += \"/\" + subdir cachedir += \"/\" + subdir if not os.path.exists(cachedir):", "subdir == parent or subdir.startswith(parent+\"/\") from radiopadre_kernel import _make_symlink def display_status(): # setup", "if this fails, we're stuck, so may as well bomb out if cachedir", "inline\") settings.display.reset = _display_reset, settings_manager.DocString(\"call this to reset sizes explicitly\") html = \"\"\"<script", "import HTML display(HTML(render_table(data, [\"\", \"\"], numbering=False))) def display_log(debug=False): from IPython.display import HTML data", "\"\"], numbering=False))) def display_log(debug=False): from IPython.display import HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug", "SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL, NBCONVERT # init", "try: import astropy astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed to import astropy\") # NONE OF", "settings.display.reset = _display_reset, settings_manager.DocString(\"call this to reset sizes explicitly\") html = \"\"\"<script type='text/javascript'>", "hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width, window_width, window_height): if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height =", "not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to view non-writeable directory, but access to the shadow", "not js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC # get buttons from various modules if not", "not found\") return None get_ipython().magic(\"matplotlib inline\") settings.display.reset = _display_reset, settings_manager.DocString(\"call this to reset", "cells[:cell + 1] # scrub cell output for c in cells: scrub_cell(c) #", "the {} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl = CACHE_URL_ROOT + reldir + \"/.radiopadre\" shadowdir =", "== parent or subdir.startswith(parent+\"/\") from radiopadre_kernel import _make_symlink def display_status(): # setup status", "# display(HTML(render_status_message(\"\"\"This notebook is now unprotected. # All users can treat it as", "radiopadre_kernel import _make_symlink def display_status(): # setup status data = [ (\"cwd\", os.getcwd())", "nbformat.reads(data, version) nbdata.keys() # convert to current format current_version = nbformat.current_nbformat nbdata =", "useful for debugging styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html += f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html", "if in a subdirectory off the root, this becomes the relative path to", "current_version = nbformat.current_nbformat nbdata = nbformat.convert(nbdata, current_version) current_format = getattr(nbformat, 'v' + str(current_version))", "None: if not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to view non-writeable directory, but access to", "may as well bomb out if cachedir is None: if not SHADOW_URL_PREFIX: raise", "= basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying to access {}, which is outside the {}", "nbformat.current_nbformat nbdata = nbformat.convert(nbdata, current_version) current_format = getattr(nbformat, 'v' + str(current_version)) # accommodate", "raise RuntimeError(\"Trying to access {}, which is outside the {} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl", "= %s[0]\" % (copy_root, copy_dirs) code += \"\\n%s.show()\" % copy_dirs # insert output", "output output = current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b style='color: red'>Please select Cell|Run all from", "# author = author or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" % author)) # display(HTML(render_status_message(\"\"\"This notebook", "a symlink to it in the shadow tree. if os.access(basedir, os.W_OK): cachedir =", ".datadir import DataDir, ls, lsR, lst, lsrt from .filelist import FileList from .fitsfile", "casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS side\") # print(\"initializing radiopadre\") _init_js_side() __init = True #", "SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying to access {}, which is outside", "URL to this directory. \"\"\" if ABSROOTDIR is None: raise RuntimeError(\"radiopadre.init() must be", "SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to view non-writeable directory, but access to the shadow tree", "get buttons from various modules if not NBCONVERT: from . import fitsfile html", "init\"\"\" global _init_js_side_done if _init_js_side_done: print(\"init_js_side already done\") return _init_js_side_done = True try:", "True # import stuff from .file import autodetect_file_type from .datadir import DataDir, ls,", "radiopadre\") _init_js_side() __init = True # import stuff from .file import autodetect_file_type from", "json import nbformat import os import pkg_resources import radiopadre_kernel from IPython.display import display,", "IPython.display import display, HTML, Javascript from radiopadre_utils.notebook_utils import scrub_cell from radiopadre import settings_manager", "cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook was automatically generated from ``%s`` using the 'copy", "False # print(\"importing radiopadre\") if not __init: from radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre", "the basedir, make a .radiopadre dir within, and make a symlink to it", "a cache subdir, if so required if subdir: cacheurl += \"/\" + subdir", "# cleanup metadata metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub'] = 0 if 'signature' in metadata:", "pkg_resources import radiopadre_kernel from IPython.display import display, HTML, Javascript from radiopadre_utils.notebook_utils import scrub_cell", "import FITSFile from .imagefile import ImageFile from .casatable import CasaTable from .htmlfile import", "%s[0]\" % (copy_root, copy_dirs) code += \"\\n%s.show()\" % copy_dirs # insert output output", ".radiopadre dir within, and make a symlink to it in the shadow tree.", "else path.rstrip(\"/\") def _is_subdir(subdir, parent): return subdir == parent or subdir.startswith(parent+\"/\") from radiopadre_kernel", "is None: raise RuntimeError(\"radiopadre.init() must be called first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir,", "HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else \"INFO\") display(HTML(render_table(data, [\"\", \"\"], numbering=False))) show_status", "_init_js_side_done: print(\"init_js_side already done\") return _init_js_side_done = True try: get_ipython except: print(\"get_ipython not", "display(Javascript(\"document.radiopadre.protect('%s')\" % author)) # display(HTML(render_status_message(\"\"\"This notebook is now protected, author is \"%s\". #", "SLASH!!! def _strip_slash(path): return path if path == \"/\" or path is None", "'signature' in metadata: metadata['signature'] = \"\" # save nbformat.write(nbdata, open(newpath, 'w'), version) return", "astropy\") # NONE OF THE DIR NAMES ABOVE SHALL HAVE A TRALING SLASH!!!", "None # if we can write to the basedir, make a .radiopadre dir", "html += fitsfile.add_general_buttons() # get list of warnings and errors from init errors", "\"\"], numbering=False) display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width, window_width, window_height): if settings.display.auto_reset:", "nbdata.keys() # convert to current format current_version = nbformat.current_nbformat nbdata = nbformat.convert(nbdata, current_version)", "% copy_dirs if copy_root: code += \"\\n%s = %s[0]\" % (copy_root, copy_dirs) code", "print(\"init_js_side already done\") return _init_js_side_done = True try: get_ipython except: print(\"get_ipython not found\")", "automatically generated from ``%s`` using the 'copy notebook' feature. Please select \"Cell|Run all\"", "save nbformat.write(nbdata, open(newpath, 'w'), version) return newpath __init = False # print(\"importing radiopadre\")", "+= render_table(errors, [\"\", \"\"], numbering=False) display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width, window_width,", "display_log def get_cache_dir(path, subdir=None): \"\"\" Creates directory for caching radiopadre stuff associated with", "from IPython.display import display, HTML, Javascript from radiopadre_utils.notebook_utils import scrub_cell from radiopadre import", "outputs=[output])) # insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook was automatically generated from", "= None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks that Javascript components of radiopadre", "debug else \"INFO\") display(HTML(render_table(data, [\"\", \"\"], numbering=False))) show_status = display_status show_log = display_log", "from .filelist import FileList from .fitsfile import FITSFile from .imagefile import ImageFile from", "worksheets, if available if hasattr(nbdata, 'worksheets'): raise (RuntimeError, \"copy_current_notebook: not compatible with worksheets\")", "a subdirectory off the root, this becomes the relative path to it, else", "styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html += f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\"", "js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC # get buttons from various modules if not NBCONVERT:", "-p {}\".format(shadowdir)) shadowdir += \"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir = shadowdir else: cachedir =", "TransientMessage, render_status_message, render_table # this stuff is setup by the kernel, pull from", "to access {}, which is outside the {} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl = CACHE_URL_ROOT", "set_window_sizes(cell_width, window_width, window_height): if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height = \\ cell_width, window_width, window_height", "various modules if not NBCONVERT: from . import fitsfile html += fitsfile.add_general_buttons() #", "\"\\n%s = %s[0]\" % (copy_root, copy_dirs) code += \"\\n%s.show()\" % copy_dirs # insert", "RuntimeError(\"Trying to access {}, which is outside the {} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl =", "\"\" # save nbformat.write(nbdata, open(newpath, 'w'), version) return newpath __init = False #", "import stuff from .file import autodetect_file_type from .datadir import DataDir, ls, lsR, lst,", "not os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache directory {} not user-writeable. Try removing it?\".format(cachedir)) #", "# read notebook data data = open(oldpath).read() version = json.loads(data)['nbformat'] nbdata = nbformat.reads(data,", "RuntimeError(\"Trying to view non-writeable directory, but access to the shadow tree is not", "init settings settings = settings_manager.RadiopadreSettingsManager() try: __version__ = pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__ =", "if os.access(basedir, os.W_OK): cachedir = basedir + \"/.radiopadre\" if not os.path.exists(cachedir): os.mkdir(cachedir) if", "= display_status show_log = display_log def get_cache_dir(path, subdir=None): \"\"\" Creates directory for caching", "to import astropy\") # NONE OF THE DIR NAMES ABOVE SHALL HAVE A", "buttons from various modules if not NBCONVERT: from . import fitsfile html +=", "is \"%s\". # All other users will have to treat this notebook as", "# display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook is now unprotected. # All users can treat", "cell_width, window_width, window_height # def protect(author=None): # \"\"\"Makes current notebook protected with the", "nbformat.convert(nbdata, current_version) current_format = getattr(nbformat, 'v' + str(current_version)) # accommodate worksheets, if available", "raise (RuntimeError, \"copy_current_notebook: not compatible with worksheets\") metadata = nbdata['metadata'] cells = nbdata['cells']", "% (copy_root, copy_dirs) code += \"\\n%s.show()\" % copy_dirs # insert output output =", "in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT", "document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) # reload styles -- loaded from radiopadre-kernel.js already, but", "is the (shadow) filesystem location of the directory. The latter is the URL", "\"/\" + subdir if not os.path.exists(cachedir): os.mkdir(cachedir) return cachedir, cacheurl _init_js_side_done = None", "= radiopadre_kernel.log_handler.get_records('WARNING') if errors: html += render_table(errors, [\"\", \"\"], numbering=False) display(HTML(html)) def hide_cell_code(hide=True):", "metadata['radiopadre_notebook_scrub'] = 0 if 'signature' in metadata: metadata['signature'] = \"\" # save nbformat.write(nbdata,", "URL from .table import tabulate from .render import render_table, render_preamble, render_refresh_button, render_status_message, rich_string,", "this to reset sizes explicitly\") html = \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER'])", "raise RuntimeError(\"Cache directory {} not user-writeable. Try removing it?\".format(cachedir)) # make a cache", "\"copy_current_notebook: not compatible with worksheets\") metadata = nbdata['metadata'] cells = nbdata['cells'] # strip", "won't be saved # unless the user matches the author.\"\"\" # author =", "\"\"\" % (newpath, oldpath), )) # cleanup metadata metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub'] =", "os import pkg_resources import radiopadre_kernel from IPython.display import display, HTML, Javascript from radiopadre_utils.notebook_utils", "import pkg_resources import radiopadre_kernel from IPython.display import display, HTML, Javascript from radiopadre_utils.notebook_utils import", "not os.path.exists(cachedir): os.mkdir(cachedir) return cachedir, cacheurl _init_js_side_done = None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def", "the menu to render this notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code, outputs=[output])) # insert markdown", "import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS side\") # print(\"initializing radiopadre\") _init_js_side() __init = True", "# All other users will have to treat this notebook as read-only.\"\"\" %", "_init_js_side() __init = True # import stuff from .file import autodetect_file_type from .datadir", "<reponame>ratt-ru/radiopadre<filename>radiopadre/__init__.py<gh_stars>1-10 import json import nbformat import os import pkg_resources import radiopadre_kernel from IPython.display", "copy_dirs # insert output output = current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b style='color: red'>Please select", "os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir += \"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir = shadowdir else: cachedir", "html += render_table(errors, [\"\", \"\"], numbering=False) display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width,", "treat it as read-write.\"\"\"))) # def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'): # read", "notebook is now unprotected. # All users can treat it as read-write.\"\"\"))) #", "# make a cache subdir, if so required if subdir: cacheurl += \"/\"", "# display(Javascript(\"document.radiopadre.protect('%s')\" % author)) # display(HTML(render_status_message(\"\"\"This notebook is now protected, author is \"%s\".", ".file import autodetect_file_type from .datadir import DataDir, ls, lsR, lst, lsrt from .filelist", "# def protect(author=None): # \"\"\"Makes current notebook protected with the given author name.", "\"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) # reload styles -- loaded from radiopadre-kernel.js", "not compatible with worksheets\") metadata = nbdata['metadata'] cells = nbdata['cells'] # strip out", "cacheurl _init_js_side_done = None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks that Javascript components", "is not set up. This is a bug.\") cachedir = shadowdir + \"/.radiopadre\"", "open(oldpath).read() version = json.loads(data)['nbformat'] nbdata = nbformat.reads(data, version) nbdata.keys() # convert to current", "insert boilerplate code code = \"import radiopadre\\n\" + \\ \"%s = radiopadre.DirList('.')\" %", "= display_log def get_cache_dir(path, subdir=None): \"\"\" Creates directory for caching radiopadre stuff associated", "with worksheets\") metadata = nbdata['metadata'] cells = nbdata['cells'] # strip out all cells", "# \"\"\"Makes current notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook is now unprotected.", "display(HTML(render_status_message(\"\"\"This notebook is now unprotected. # All users can treat it as read-write.\"\"\")))", "\"development\" ## various notebook-related init try: import astropy astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed to", "CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data += [(\"\", \"startup log follows:\")] data +=", "metadata: metadata['signature'] = \"\" # save nbformat.write(nbdata, open(newpath, 'w'), version) return newpath __init", "= nbformat.current_nbformat nbdata = nbformat.convert(nbdata, current_version) current_format = getattr(nbformat, 'v' + str(current_version)) #", "shadowdir else: cachedir = None # if cachedir remains None, we weren't able", "import ImageFile from .casatable import CasaTable from .htmlfile import HTMLFile, URL from .table", "access to the shadow tree is not set up. This is a bug.\")", "import SESSION_ID, VERBOSE, HOSTNAME, \\ LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR,", "def set_window_sizes(cell_width, window_width, window_height): if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height = \\ cell_width, window_width,", "try: get_ipython except: print(\"get_ipython not found\") return None get_ipython().magic(\"matplotlib inline\") settings.display.reset = _display_reset,", "cachedir = None # if cachedir remains None, we weren't able to make", "def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks that Javascript components of radiopadre are initialized,", "not os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir)) if not os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache directory {}", "import settings_manager from radiopadre.render import render_error, show_exception, TransientMessage, render_status_message, render_table # this stuff", "fitsfile.add_general_buttons() # get list of warnings and errors from init errors = radiopadre_kernel.log_handler.get_records('WARNING')", "import os import pkg_resources import radiopadre_kernel from IPython.display import display, HTML, Javascript from", "_is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying to access {}, which is", "notebook was automatically generated from ``%s`` using the 'copy notebook' feature. Please select", "ABSROOTDIR)) cacheurl = CACHE_URL_ROOT + reldir + \"/.radiopadre\" shadowdir = SHADOW_HOME + basedir", "os.mkdir(cachedir) return cachedir, cacheurl _init_js_side_done = None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks", "\"/.radiopadre\" shadowdir = SHADOW_HOME + basedir cachedir = None # if we can", "is None else path.rstrip(\"/\") def _is_subdir(subdir, parent): return subdir == parent or subdir.startswith(parent+\"/\")", "if not os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir, os.W_OK): if not os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir))", "{open(styles_file).read()} </style>\"\"\" html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import layouts html += layouts.init_html", "protected, author is \"%s\". # All other users will have to treat this", "%s\\nThis radiopadre notebook was automatically generated from ``%s`` using the 'copy notebook' feature.", "cells: scrub_cell(c) # insert boilerplate code code = \"import radiopadre\\n\" + \\ \"%s", "import nbformat import os import pkg_resources import radiopadre_kernel from IPython.display import display, HTML,", "import radiopadre_kernel from IPython.display import display, HTML, Javascript from radiopadre_utils.notebook_utils import scrub_cell from", "[\"\", \"\"], numbering=False) display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width, window_width, window_height): if", "_display_reset, settings_manager.DocString(\"call this to reset sizes explicitly\") html = \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings();", "\\ \"%s = radiopadre.DirList('.')\" % copy_dirs if copy_root: code += \"\\n%s = %s[0]\"", "root, this becomes the relative path to it, else \"\" reldir = basedir[len(ABSROOTDIR):]", "current notebook protected with the given author name. Protected notebooks won't be saved", "# scrub cell output for c in cells: scrub_cell(c) # insert boilerplate code", "if we can write to the basedir, make a .radiopadre dir within, and", "# unless the user matches the author.\"\"\" # author = author or os.environ['USER']", "import render_error, show_exception, TransientMessage, render_status_message, render_table # this stuff is setup by the", "current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b style='color: red'>Please select Cell|Run all from the menu to", "current notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook is now unprotected. # All", "os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir += \"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir = shadowdir else:", "is the URL to this directory. \"\"\" if ABSROOTDIR is None: raise RuntimeError(\"radiopadre.init()", "lsrt from .filelist import FileList from .fitsfile import FITSFile from .imagefile import ImageFile", "remains None, we weren't able to make a writeable one in the main", "explicitly\") html = \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) # reload styles --", "the URL to this directory. \"\"\" if ABSROOTDIR is None: raise RuntimeError(\"radiopadre.init() must", "bomb out if cachedir is None: if not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to view", "render_error, show_exception, TransientMessage, render_status_message, render_table # this stuff is setup by the kernel,", "(RuntimeError, \"copy_current_notebook: not compatible with worksheets\") metadata = nbdata['metadata'] cells = nbdata['cells'] #", "this notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code, outputs=[output])) # insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre", "_strip_slash(path): return path if path == \"/\" or path is None else path.rstrip(\"/\")", "radiopadre_kernel.log_handler.get_records() from IPython.display import HTML display(HTML(render_table(data, [\"\", \"\"], numbering=False))) def display_log(debug=False): from IPython.display", "with the given file. Returns tuple of (real_path, url_path). The former is the", "as read-only.\"\"\" % author))) # # # def unprotect(): # \"\"\"Makes current notebook", "_init_js_side_done = True try: get_ipython except: print(\"get_ipython not found\") return None get_ipython().magic(\"matplotlib inline\")", "DataDir, ls, lsR, lst, lsrt from .filelist import FileList from .fitsfile import FITSFile", "_make_symlink def display_status(): # setup status data = [ (\"cwd\", os.getcwd()) ] for", "this fails, we're stuck, so may as well bomb out if cachedir is", "radiopadre_kernel.log_handler.get_records('WARNING') if errors: html += render_table(errors, [\"\", \"\"], numbering=False) display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not", "+ basedir cachedir = None # if we can write to the basedir,", "already, but reloading is useful for debugging styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html +=", "return _init_js_side_done = True try: get_ipython except: print(\"get_ipython not found\") return None get_ipython().magic(\"matplotlib", "settings.display.window_width, settings.display.window_height = \\ cell_width, window_width, window_height # def protect(author=None): # \"\"\"Makes current", "Cell|Run all from the menu to render this notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code, outputs=[output]))", "is now unprotected. # All users can treat it as read-write.\"\"\"))) # def", "}) cells.insert(0, current_format.new_code_cell(code, outputs=[output])) # insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook was", "so required if subdir: cacheurl += \"/\" + subdir cachedir += \"/\" +", "of (real_path, url_path). The former is the (shadow) filesystem location of the directory.", "subdir cachedir += \"/\" + subdir if not os.path.exists(cachedir): os.mkdir(cachedir) return cachedir, cacheurl", "show_status = display_status show_log = display_log def get_cache_dir(path, subdir=None): \"\"\" Creates directory for", "debugging styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html += f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html += \"\"\"<DIV", "unprotected. # All users can treat it as read-write.\"\"\"))) # def copy_current_notebook(oldpath, newpath,", "metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub'] = 0 if 'signature' in metadata: metadata['signature'] = \"\"", "notebook' feature. Please select \"Cell|Run all\" from the menu to render this notebook.", "reldir + \"/.radiopadre\" shadowdir = SHADOW_HOME + basedir cachedir = None # if", "% author)) # display(HTML(render_status_message(\"\"\"This notebook is now protected, author is \"%s\". # All", "# setup status data = [ (\"cwd\", os.getcwd()) ] for varname in \"\"\"SESSION_ID", "if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height = \\ cell_width, window_width, window_height # def protect(author=None):", "of radiopadre are initialized, does various other init\"\"\" global _init_js_side_done if _init_js_side_done: print(\"init_js_side", "+= \"/\" + subdir if not os.path.exists(cachedir): os.mkdir(cachedir) return cachedir, cacheurl _init_js_side_done =", "0 metadata['radiopadre_notebook_scrub'] = 0 if 'signature' in metadata: metadata['signature'] = \"\" # save", "NBCONVERT # init settings settings = settings_manager.RadiopadreSettingsManager() try: __version__ = pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound:", "is now protected, author is \"%s\". # All other users will have to", "= nbdata['cells'] # strip out all cells up to and including indicated one", "% copy_dirs # insert output output = current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b style='color: red'>Please", "# display(HTML(render_status_message(\"\"\"This notebook is now protected, author is \"%s\". # All other users", "can treat it as read-write.\"\"\"))) # def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'): #", "# strip out all cells up to and including indicated one del cells[:cell", "# accommodate worksheets, if available if hasattr(nbdata, 'worksheets'): raise (RuntimeError, \"copy_current_notebook: not compatible", "filesystem location of the directory. The latter is the URL to this directory.", "return path if path == \"/\" or path is None else path.rstrip(\"/\") def", "but access to the shadow tree is not set up. This is a", "\\ LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT,", "def display_log(debug=False): from IPython.display import HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else \"INFO\")", "stuff associated with the given file. Returns tuple of (real_path, url_path). The former", "\"\\n%s.show()\" % copy_dirs # insert output output = current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b style='color:", "scrub_cell from radiopadre import settings_manager from radiopadre.render import render_error, show_exception, TransientMessage, render_status_message, render_table", "'v' + str(current_version)) # accommodate worksheets, if available if hasattr(nbdata, 'worksheets'): raise (RuntimeError,", "associated with the given file. Returns tuple of (real_path, url_path). The former is", "# # # def unprotect(): # \"\"\"Makes current notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) #", "writeable one in the main tree -- use shadow tree # if this", "tree -- use shadow tree # if this fails, we're stuck, so may", "get_cache_dir(path, subdir=None): \"\"\" Creates directory for caching radiopadre stuff associated with the given", "settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height = \\ cell_width, window_width, window_height # def protect(author=None): #", "SHALL HAVE A TRALING SLASH!!! def _strip_slash(path): return path if path == \"/\"", "ABSROOTDIR is None: raise RuntimeError(\"radiopadre.init() must be called first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if", "# \"\"\"Makes current notebook protected with the given author name. Protected notebooks won't", "treat this notebook as read-only.\"\"\" % author))) # # # def unprotect(): #", "from the menu to render this notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code, outputs=[output])) # insert", "None: raise RuntimeError(\"radiopadre.init() must be called first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR):", "radiopadre_utils.notebook_utils import scrub_cell from radiopadre import settings_manager from radiopadre.render import render_error, show_exception, TransientMessage,", "it, else \"\" reldir = basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else:", "nbformat.write(nbdata, open(newpath, 'w'), version) return newpath __init = False # print(\"importing radiopadre\") if", "# init settings settings = settings_manager.RadiopadreSettingsManager() try: __version__ = pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__", "SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data += [(\"\", \"startup log follows:\")] data += radiopadre_kernel.log_handler.get_records()", "follows:\")] data += radiopadre_kernel.log_handler.get_records() from IPython.display import HTML display(HTML(render_table(data, [\"\", \"\"], numbering=False))) def", "shadowdir = SHADOW_HOME + basedir cachedir = None # if we can write", "basedir cachedir = None # if we can write to the basedir, make", "init errors = radiopadre_kernel.log_handler.get_records('WARNING') if errors: html += render_table(errors, [\"\", \"\"], numbering=False) display(HTML(html))", ".table import tabulate from .render import render_table, render_preamble, render_refresh_button, render_status_message, rich_string, render_url, render_title", "SHADOW_HOME + basedir cachedir = None # if we can write to the", "= None # if cachedir remains None, we weren't able to make a", "+ \"/.radiopadre\" if not os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir)) if not os.access(cachedir, os.W_OK): raise", "os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir, os.W_OK): if not os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir +=", "notebook is now protected, author is \"%s\". # All other users will have", "astropy astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed to import astropy\") # NONE OF THE DIR", "except pkg_resources.DistributionNotFound: __version__ = \"development\" ## various notebook-related init try: import astropy astropy.log.setLevel('ERROR')", "open(newpath, 'w'), version) return newpath __init = False # print(\"importing radiopadre\") if not", "os.W_OK): cachedir = basedir + \"/.radiopadre\" if not os.path.exists(cachedir): os.mkdir(cachedir) if os.access(cachedir, os.W_OK):", "display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width, window_width, window_height): if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height = \\", "NONE OF THE DIR NAMES ABOVE SHALL HAVE A TRALING SLASH!!! def _strip_slash(path):", "# if we can write to the basedir, make a .radiopadre dir within,", "directory. The latter is the URL to this directory. \"\"\" if ABSROOTDIR is", "display, HTML, Javascript from radiopadre_utils.notebook_utils import scrub_cell from radiopadre import settings_manager from radiopadre.render", "various notebook-related init try: import astropy astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed to import astropy\")", "SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data += [(\"\", \"startup log follows:\")] data += radiopadre_kernel.log_handler.get_records() from", "(newpath, oldpath), )) # cleanup metadata metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub'] = 0 if", "if not os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir += \"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir =", "nbdata = nbformat.reads(data, version) nbdata.keys() # convert to current format current_version = nbformat.current_nbformat", "= CACHE_URL_ROOT + reldir + \"/.radiopadre\" shadowdir = SHADOW_HOME + basedir cachedir =", "if _is_subdir(basedir, ABSROOTDIR): # if in a subdirectory off the root, this becomes", "read notebook data data = open(oldpath).read() version = json.loads(data)['nbformat'] nbdata = nbformat.reads(data, version)", "author.\"\"\" # author = author or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" % author)) # display(HTML(render_status_message(\"\"\"This", "settings = settings_manager.RadiopadreSettingsManager() try: __version__ = pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__ = \"development\" ##", "not os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir += \"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir = shadowdir", "html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import layouts html += layouts.init_html from radiopadre_kernel", "for caching radiopadre stuff associated with the given file. Returns tuple of (real_path,", "Javascript components of radiopadre are initialized, does various other init\"\"\" global _init_js_side_done if", "from the menu to render this notebook. \"\"\" % (newpath, oldpath), )) #", "to reset sizes explicitly\") html = \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) #", "version) nbdata.keys() # convert to current format current_version = nbformat.current_nbformat nbdata = nbformat.convert(nbdata,", "reload styles -- loaded from radiopadre-kernel.js already, but reloading is useful for debugging", "warnings and errors from init errors = radiopadre_kernel.log_handler.get_records('WARNING') if errors: html += render_table(errors,", "from it from radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME, \\ LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR,", "</script> \"\"\".format(os.environ['USER']) # reload styles -- loaded from radiopadre-kernel.js already, but reloading is", "copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'): # read notebook data data = open(oldpath).read() version", "FITSFile from .imagefile import ImageFile from .casatable import CasaTable from .htmlfile import HTMLFile,", "html += layouts.init_html from radiopadre_kernel import js9 if not js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC", "HTML, Javascript from radiopadre_utils.notebook_utils import scrub_cell from radiopadre import settings_manager from radiopadre.render import", "{}, which is outside the {} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl = CACHE_URL_ROOT + reldir", "fitsfile html += fitsfile.add_general_buttons() # get list of warnings and errors from init", "'worksheets'): raise (RuntimeError, \"copy_current_notebook: not compatible with worksheets\") metadata = nbdata['metadata'] cells =", "os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html += f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre", "output for c in cells: scrub_cell(c) # insert boilerplate code code = \"import", "THE DIR NAMES ABOVE SHALL HAVE A TRALING SLASH!!! def _strip_slash(path): return path", "from radiopadre_kernel import _make_symlink def display_status(): # setup status data = [ (\"cwd\",", "output = current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b style='color: red'>Please select Cell|Run all from the", "= [ (\"cwd\", os.getcwd()) ] for varname in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME", "Returns tuple of (real_path, url_path). The former is the (shadow) filesystem location of", "ABOVE SHALL HAVE A TRALING SLASH!!! def _strip_slash(path): return path if path ==", "scrub cell output for c in cells: scrub_cell(c) # insert boilerplate code code", "from .table import tabulate from .render import render_table, render_preamble, render_refresh_button, render_status_message, rich_string, render_url,", "one in the main tree -- use shadow tree # if this fails,", "it from radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME, \\ LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME,", "numbering=False))) def display_log(debug=False): from IPython.display import HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else", "import display, HTML, Javascript from radiopadre_utils.notebook_utils import scrub_cell from radiopadre import settings_manager from", "be called first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR): # if in a", "former is the (shadow) filesystem location of the directory. The latter is the", "display(HTML(render_table(data, [\"\", \"\"], numbering=False))) show_status = display_status show_log = display_log def get_cache_dir(path, subdir=None):", "show_log = display_log def get_cache_dir(path, subdir=None): \"\"\" Creates directory for caching radiopadre stuff", "# NONE OF THE DIR NAMES ABOVE SHALL HAVE A TRALING SLASH!!! def", "radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME, \\ LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\", "</style>\"\"\" html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import layouts html += layouts.init_html from", "All users can treat it as read-write.\"\"\"))) # def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs',", "get_ipython().magic(\"matplotlib inline\") settings.display.reset = _display_reset, settings_manager.DocString(\"call this to reset sizes explicitly\") html =", "copy_dirs='dirs', copy_root='root'): # read notebook data data = open(oldpath).read() version = json.loads(data)['nbformat'] nbdata", "FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL, NBCONVERT # init settings settings =", "have to treat this notebook as read-only.\"\"\" % author))) # # # def", "from ``%s`` using the 'copy notebook' feature. Please select \"Cell|Run all\" from the", ".filelist import FileList from .fitsfile import FITSFile from .imagefile import ImageFile from .casatable", ".htmlfile import HTMLFile, URL from .table import tabulate from .render import render_table, render_preamble,", "and errors from init errors = radiopadre_kernel.log_handler.get_records('WARNING') if errors: html += render_table(errors, [\"\",", "= json.loads(data)['nbformat'] nbdata = nbformat.reads(data, version) nbdata.keys() # convert to current format current_version", "up to and including indicated one del cells[:cell + 1] # scrub cell", "(\"cwd\", os.getcwd()) ] for varname in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR", "code += \"\\n%s = %s[0]\" % (copy_root, copy_dirs) code += \"\\n%s.show()\" % copy_dirs", "as well bomb out if cachedir is None: if not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying", "if ABSROOTDIR is None: raise RuntimeError(\"radiopadre.init() must be called first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path)))", "radiopadre\\n\" + \\ \"%s = radiopadre.DirList('.')\" % copy_dirs if copy_root: code += \"\\n%s", ".imagefile import ImageFile from .casatable import CasaTable from .htmlfile import HTMLFile, URL from", "type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) # reload styles -- loaded from radiopadre-kernel.js already,", "= None # if we can write to the basedir, make a .radiopadre", "data data = open(oldpath).read() version = json.loads(data)['nbformat'] nbdata = nbformat.reads(data, version) nbdata.keys() #", "\"/\" or path is None else path.rstrip(\"/\") def _is_subdir(subdir, parent): return subdir ==", "will have to treat this notebook as read-only.\"\"\" % author))) # # #", "settings.display.cell_width, settings.display.window_width, settings.display.window_height = \\ cell_width, window_width, window_height # def protect(author=None): # \"\"\"Makes", "settings_manager.RadiopadreSettingsManager() try: __version__ = pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__ = \"development\" ## various notebook-related", "if not js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC # get buttons from various modules if", "compatible with worksheets\") metadata = nbdata['metadata'] cells = nbdata['cells'] # strip out all", "menu to render this notebook. \"\"\" % (newpath, oldpath), )) # cleanup metadata", "in metadata: metadata['signature'] = \"\" # save nbformat.write(nbdata, open(newpath, 'w'), version) return newpath", "\"\"\"Makes current notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook is now unprotected. #", "_init_js_side_done = None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks that Javascript components of", "+= [(\"\", \"startup log follows:\")] data += radiopadre_kernel.log_handler.get_records() from IPython.display import HTML display(HTML(render_table(data,", "if not os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache directory {} not user-writeable. Try removing it?\".format(cachedir))", "is setup by the kernel, pull from it from radiopadre_kernel import SESSION_ID, VERBOSE,", "display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks that Javascript components of radiopadre are initialized, does various", "to render this notebook. \"\"\" % (newpath, oldpath), )) # cleanup metadata metadata['radiopadre_notebook_protect']", "IPython.display import HTML display(HTML(render_table(data, [\"\", \"\"], numbering=False))) def display_log(debug=False): from IPython.display import HTML", "to the basedir, make a .radiopadre dir within, and make a symlink to", "os.access(cachedir, os.W_OK): if not os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir += \"/.radiopadre\" _make_symlink(cachedir, shadowdir)", "ImportError: radiopadre_kernel.log.warning(\"Failed to import astropy\") # NONE OF THE DIR NAMES ABOVE SHALL", "os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" % author)) # display(HTML(render_status_message(\"\"\"This notebook is now protected, author is", "version = json.loads(data)['nbformat'] nbdata = nbformat.reads(data, version) nbdata.keys() # convert to current format", "from radiopadre_utils.notebook_utils import scrub_cell from radiopadre import settings_manager from radiopadre.render import render_error, show_exception,", "pkg_resources.DistributionNotFound: __version__ = \"development\" ## various notebook-related init try: import astropy astropy.log.setLevel('ERROR') except", "SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data += [(\"\",", "current format current_version = nbformat.current_nbformat nbdata = nbformat.convert(nbdata, current_version) current_format = getattr(nbformat, 'v'", "# save nbformat.write(nbdata, open(newpath, 'w'), version) return newpath __init = False # print(\"importing", "_init_js_side(): \"\"\"Checks that Javascript components of radiopadre are initialized, does various other init\"\"\"", "radiopadre stuff associated with the given file. Returns tuple of (real_path, url_path). The", "SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data += [(\"\", \"startup log", "this stuff is setup by the kernel, pull from it from radiopadre_kernel import", "from IPython.display import HTML display(HTML(render_table(data, [\"\", \"\"], numbering=False))) def display_log(debug=False): from IPython.display import", "the root, this becomes the relative path to it, else \"\" reldir =", "removing it?\".format(cachedir)) # make a cache subdir, if so required if subdir: cacheurl", "copy_root='root'): # read notebook data data = open(oldpath).read() version = json.loads(data)['nbformat'] nbdata =", "+ \\ \"%s = radiopadre.DirList('.')\" % copy_dirs if copy_root: code += \"\\n%s =", "generated from ``%s`` using the 'copy notebook' feature. Please select \"Cell|Run all\" from", "oldpath), )) # cleanup metadata metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub'] = 0 if 'signature'", "newpath __init = False # print(\"importing radiopadre\") if not __init: from radiopadre_kernel import", "shadowdir) cachedir = shadowdir else: cachedir = None # if cachedir remains None,", "html += js9.JS9_INIT_HTML_DYNAMIC # get buttons from various modules if not NBCONVERT: from", "user matches the author.\"\"\" # author = author or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" %", "hide)});\")) def set_window_sizes(cell_width, window_width, window_height): if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height = \\ cell_width,", "= radiopadre.DirList('.')\" % copy_dirs if copy_root: code += \"\\n%s = %s[0]\" % (copy_root,", "markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook was automatically generated from ``%s`` using the", "unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook is now unprotected. # All users can", "SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL,", "\"\"\" Creates directory for caching radiopadre stuff associated with the given file. Returns", "now protected, author is \"%s\". # All other users will have to treat", "_display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks that Javascript components of radiopadre are initialized, does", "radiopadre are initialized, does various other init\"\"\" global _init_js_side_done if _init_js_side_done: print(\"init_js_side already", "the given file. Returns tuple of (real_path, url_path). The former is the (shadow)", "True try: get_ipython except: print(\"get_ipython not found\") return None get_ipython().magic(\"matplotlib inline\") settings.display.reset =", "SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL, NBCONVERT # init settings", "the main tree -- use shadow tree # if this fails, we're stuck,", "-- loaded from radiopadre-kernel.js already, but reloading is useful for debugging styles_file =", "import astropy\") # NONE OF THE DIR NAMES ABOVE SHALL HAVE A TRALING", "in the shadow tree. if os.access(basedir, os.W_OK): cachedir = basedir + \"/.radiopadre\" if", "= os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html += f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from", "= pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__ = \"development\" ## various notebook-related init try: import", "+= js9.JS9_INIT_HTML_DYNAMIC # get buttons from various modules if not NBCONVERT: from .", "import fitsfile html += fitsfile.add_general_buttons() # get list of warnings and errors from", "all cells up to and including indicated one del cells[:cell + 1] #", "% author))) # # # def unprotect(): # \"\"\"Makes current notebook unprotected.\"\"\" #", "[ (\"cwd\", os.getcwd()) ] for varname in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR", "required if subdir: cacheurl += \"/\" + subdir cachedir += \"/\" + subdir", "+ \"/.radiopadre\" shadowdir = SHADOW_HOME + basedir cachedir = None # if we", "in a subdirectory off the root, this becomes the relative path to it,", "current_version) current_format = getattr(nbformat, 'v' + str(current_version)) # accommodate worksheets, if available if", "lst, lsrt from .filelist import FileList from .fitsfile import FITSFile from .imagefile import", "{} hierarchy\".format(basedir, ABSROOTDIR)) cacheurl = CACHE_URL_ROOT + reldir + \"/.radiopadre\" shadowdir = SHADOW_HOME", "+= \"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir = shadowdir else: cachedir = None # if", "data += [(\"\", \"startup log follows:\")] data += radiopadre_kernel.log_handler.get_records() from IPython.display import HTML", "def get_cache_dir(path, subdir=None): \"\"\" Creates directory for caching radiopadre stuff associated with the", "tuple of (real_path, url_path). The former is the (shadow) filesystem location of the", "The latter is the URL to this directory. \"\"\" if ABSROOTDIR is None:", "= settings_manager.RadiopadreSettingsManager() try: __version__ = pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__ = \"development\" ## various", "try: __version__ = pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__ = \"development\" ## various notebook-related init", "path to it, else \"\" reldir = basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir =", "reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying to access {}, which is outside the", "not user-writeable. Try removing it?\".format(cachedir)) # make a cache subdir, if so required", "\"html/radiopadre.css\") html += f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import", "[\"<b style='color: red'>Please select Cell|Run all from the menu to render this notebook.</b>\"]", "data = [ (\"cwd\", os.getcwd()) ] for varname in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR", "] for varname in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX", "if debug else \"INFO\") display(HTML(render_table(data, [\"\", \"\"], numbering=False))) show_status = display_status show_log =", "= shadowdir + \"/.radiopadre\" if not os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir)) if not os.access(cachedir,", "protected with the given author name. Protected notebooks won't be saved # unless", "c in cells: scrub_cell(c) # insert boilerplate code code = \"import radiopadre\\n\" +", "raise RuntimeError(\"Trying to view non-writeable directory, but access to the shadow tree is", "dir within, and make a symlink to it in the shadow tree. if", "from various modules if not NBCONVERT: from . import fitsfile html += fitsfile.add_general_buttons()", "author)) # display(HTML(render_status_message(\"\"\"This notebook is now protected, author is \"%s\". # All other", "cells = nbdata['cells'] # strip out all cells up to and including indicated", "or path is None else path.rstrip(\"/\") def _is_subdir(subdir, parent): return subdir == parent", "This is a bug.\") cachedir = shadowdir + \"/.radiopadre\" if not os.path.exists(cachedir): os.system(\"mkdir", "cachedir = shadowdir + \"/.radiopadre\" if not os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir)) if not", "the given author name. Protected notebooks won't be saved # unless the user", "\"\"\".format(os.environ['USER']) # reload styles -- loaded from radiopadre-kernel.js already, but reloading is useful", "ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname,", "= \"\" # save nbformat.write(nbdata, open(newpath, 'w'), version) return newpath __init = False", "JS side\") # print(\"initializing radiopadre\") _init_js_side() __init = True # import stuff from", "copy_dirs) code += \"\\n%s.show()\" % copy_dirs # insert output output = current_format.new_output(\"display_data\", data={", "autodetect_file_type from .datadir import DataDir, ls, lsR, lst, lsrt from .filelist import FileList", "os.mkdir(cachedir) if os.access(cachedir, os.W_OK): if not os.path.exists(shadowdir): os.system(\"mkdir -p {}\".format(shadowdir)) shadowdir += \"/.radiopadre\"", "other users will have to treat this notebook as read-only.\"\"\" % author))) #", "+ 1] # scrub cell output for c in cells: scrub_cell(c) # insert", "the (shadow) filesystem location of the directory. The latter is the URL to", "fails, we're stuck, so may as well bomb out if cachedir is None:", "# print(\"importing radiopadre\") if not __init: from radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing radiopadre JS", "unless the user matches the author.\"\"\" # author = author or os.environ['USER'] #", "elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying to access {}, which", "given author name. Protected notebooks won't be saved # unless the user matches", "[\"\", \"\"], numbering=False))) def display_log(debug=False): from IPython.display import HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if", "a bug.\") cachedir = shadowdir + \"/.radiopadre\" if not os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir))", "sizes explicitly\") html = \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) # reload styles", "display_status show_log = display_log def get_cache_dir(path, subdir=None): \"\"\" Creates directory for caching radiopadre", "tree is not set up. This is a bug.\") cachedir = shadowdir +", "given file. Returns tuple of (real_path, url_path). The former is the (shadow) filesystem", "ls, lsR, lst, lsrt from .filelist import FileList from .fitsfile import FITSFile from", "def display_status(): # setup status data = [ (\"cwd\", os.getcwd()) ] for varname", "code code = \"import radiopadre\\n\" + \\ \"%s = radiopadre.DirList('.')\" % copy_dirs if", "if so required if subdir: cacheurl += \"/\" + subdir cachedir += \"/\"", "\"\"\"Checks that Javascript components of radiopadre are initialized, does various other init\"\"\" global", "able to make a writeable one in the main tree -- use shadow", "path is None else path.rstrip(\"/\") def _is_subdir(subdir, parent): return subdir == parent or", "this becomes the relative path to it, else \"\" reldir = basedir[len(ABSROOTDIR):] elif", "def unprotect(): # \"\"\"Makes current notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook is", "list of warnings and errors from init errors = radiopadre_kernel.log_handler.get_records('WARNING') if errors: html", "global _init_js_side_done if _init_js_side_done: print(\"init_js_side already done\") return _init_js_side_done = True try: get_ipython", "radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else \"INFO\") display(HTML(render_table(data, [\"\", \"\"], numbering=False))) show_status = display_status show_log", "ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE,", "the shadow tree. if os.access(basedir, os.W_OK): cachedir = basedir + \"/.radiopadre\" if not", "protect(author=None): # \"\"\"Makes current notebook protected with the given author name. Protected notebooks", "the relative path to it, else \"\" reldir = basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR):", "del cells[:cell + 1] # scrub cell output for c in cells: scrub_cell(c)", "# insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook was automatically generated from ``%s``", "# if this fails, we're stuck, so may as well bomb out if", "# reload styles -- loaded from radiopadre-kernel.js already, but reloading is useful for", "if _init_js_side_done: print(\"init_js_side already done\") return _init_js_side_done = True try: get_ipython except: print(\"get_ipython", "copy_root: code += \"\\n%s = %s[0]\" % (copy_root, copy_dirs) code += \"\\n%s.show()\" %", "else \"INFO\") display(HTML(render_table(data, [\"\", \"\"], numbering=False))) show_status = display_status show_log = display_log def", "so may as well bomb out if cachedir is None: if not SHADOW_URL_PREFIX:", "convert to current format current_version = nbformat.current_nbformat nbdata = nbformat.convert(nbdata, current_version) current_format =", "use shadow tree # if this fails, we're stuck, so may as well", "to this directory. \"\"\" if ABSROOTDIR is None: raise RuntimeError(\"radiopadre.init() must be called", "feature. Please select \"Cell|Run all\" from the menu to render this notebook. \"\"\"", "ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split():", "available if hasattr(nbdata, 'worksheets'): raise (RuntimeError, \"copy_current_notebook: not compatible with worksheets\") metadata =", "radiopadre notebook was automatically generated from ``%s`` using the 'copy notebook' feature. Please", "radiopadre import settings_manager from radiopadre.render import render_error, show_exception, TransientMessage, render_status_message, render_table # this", "from radiopadre_kernel import js9 if not js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC # get buttons", "it as read-write.\"\"\"))) # def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'): # read notebook", "the user matches the author.\"\"\" # author = author or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\"", "of the directory. The latter is the URL to this directory. \"\"\" if", "{} not user-writeable. Try removing it?\".format(cachedir)) # make a cache subdir, if so", "cachedir, cacheurl _init_js_side_done = None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks that Javascript", "\"INFO\") display(HTML(render_table(data, [\"\", \"\"], numbering=False))) show_status = display_status show_log = display_log def get_cache_dir(path,", "CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL, NBCONVERT # init settings settings = settings_manager.RadiopadreSettingsManager() try:", "bug.\") cachedir = shadowdir + \"/.radiopadre\" if not os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir)) if", "def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width, window_width, window_height): if settings.display.auto_reset: settings.display.cell_width, settings.display.window_width, settings.display.window_height", "if 'signature' in metadata: metadata['signature'] = \"\" # save nbformat.write(nbdata, open(newpath, 'w'), version)", "globals()[varname])) data += [(\"\", \"startup log follows:\")] data += radiopadre_kernel.log_handler.get_records() from IPython.display import", "radiopadre_kernel import js9 if not js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC # get buttons from", "## various notebook-related init try: import astropy astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed to import", "# get buttons from various modules if not NBCONVERT: from . import fitsfile", "including indicated one del cells[:cell + 1] # scrub cell output for c", "\"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR", "subdir.startswith(parent+\"/\") from radiopadre_kernel import _make_symlink def display_status(): # setup status data = [", "VERBOSE, HOSTNAME, \\ LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX,", "to current format current_version = nbformat.current_nbformat nbdata = nbformat.convert(nbdata, current_version) current_format = getattr(nbformat,", "(shadow) filesystem location of the directory. The latter is the URL to this", "__init = True # import stuff from .file import autodetect_file_type from .datadir import", "import HTMLFile, URL from .table import tabulate from .render import render_table, render_preamble, render_refresh_button,", "+= f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import layouts html", "= SHADOW_HOME + basedir cachedir = None # if we can write to", "main tree -- use shadow tree # if this fails, we're stuck, so", "= shadowdir else: cachedir = None # if cachedir remains None, we weren't", "{}\".format(cachedir)) if not os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache directory {} not user-writeable. Try removing", "\\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL, NBCONVERT # init settings settings", "latter is the URL to this directory. \"\"\" if ABSROOTDIR is None: raise", "setup by the kernel, pull from it from radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME,", "if not os.path.exists(cachedir): os.system(\"mkdir -p {}\".format(cachedir)) if not os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache directory", "settings_manager from radiopadre.render import render_error, show_exception, TransientMessage, render_status_message, render_table # this stuff is", "_make_symlink(cachedir, shadowdir) cachedir = shadowdir else: cachedir = None # if cachedir remains", "get_ipython except: print(\"get_ipython not found\") return None get_ipython().magic(\"matplotlib inline\") settings.display.reset = _display_reset, settings_manager.DocString(\"call", "nbdata['metadata'] cells = nbdata['cells'] # strip out all cells up to and including", "is None: if not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to view non-writeable directory, but access", "set up. This is a bug.\") cachedir = shadowdir + \"/.radiopadre\" if not", "+ reldir + \"/.radiopadre\" shadowdir = SHADOW_HOME + basedir cachedir = None #", "# import stuff from .file import autodetect_file_type from .datadir import DataDir, ls, lsR,", "FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data += [(\"\", \"startup log follows:\")]", "for varname in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT", "settings settings = settings_manager.RadiopadreSettingsManager() try: __version__ = pkg_resources.require(\"radiopadre\")[0].version except pkg_resources.DistributionNotFound: __version__ = \"development\"", "cell=0, copy_dirs='dirs', copy_root='root'): # read notebook data data = open(oldpath).read() version = json.loads(data)['nbformat']", "scrub_cell(c) # insert boilerplate code code = \"import radiopadre\\n\" + \\ \"%s =", "radiopadre_kernel.log.warning(\"Failed to import astropy\") # NONE OF THE DIR NAMES ABOVE SHALL HAVE", "\"\"\"Makes current notebook protected with the given author name. Protected notebooks won't be", "with the given author name. Protected notebooks won't be saved # unless the", "= nbformat.convert(nbdata, current_version) current_format = getattr(nbformat, 'v' + str(current_version)) # accommodate worksheets, if", "# if in a subdirectory off the root, this becomes the relative path", "we can write to the basedir, make a .radiopadre dir within, and make", "cells up to and including indicated one del cells[:cell + 1] # scrub", "+= \"\\n%s = %s[0]\" % (copy_root, copy_dirs) code += \"\\n%s.show()\" % copy_dirs #", "radiopadre_kernel.log.info(\"initializing radiopadre JS side\") # print(\"initializing radiopadre\") _init_js_side() __init = True # import", "subdir: cacheurl += \"/\" + subdir cachedir += \"/\" + subdir if not", "from .htmlfile import HTMLFile, URL from .table import tabulate from .render import render_table,", "-- use shadow tree # if this fails, we're stuck, so may as", "f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html += \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import layouts html +=", "called first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR): # if in a subdirectory", "= True try: get_ipython except: print(\"get_ipython not found\") return None get_ipython().magic(\"matplotlib inline\") settings.display.reset", "was automatically generated from ``%s`` using the 'copy notebook' feature. Please select \"Cell|Run", "stuck, so may as well bomb out if cachedir is None: if not", "to the shadow tree is not set up. This is a bug.\") cachedir", "NBCONVERT: from . import fitsfile html += fitsfile.add_general_buttons() # get list of warnings", "file. Returns tuple of (real_path, url_path). The former is the (shadow) filesystem location", "relative path to it, else \"\" reldir = basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir", "it?\".format(cachedir)) # make a cache subdir, if so required if subdir: cacheurl +=", "import HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else \"INFO\") display(HTML(render_table(data, [\"\", \"\"], numbering=False)))", "OF THE DIR NAMES ABOVE SHALL HAVE A TRALING SLASH!!! def _strip_slash(path): return", "if hasattr(nbdata, 'worksheets'): raise (RuntimeError, \"copy_current_notebook: not compatible with worksheets\") metadata = nbdata['metadata']", "Creates directory for caching radiopadre stuff associated with the given file. Returns tuple", "this directory. \"\"\" if ABSROOTDIR is None: raise RuntimeError(\"radiopadre.init() must be called first\")", "from radiopadre-kernel.js already, but reloading is useful for debugging styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\")", "path if path == \"/\" or path is None else path.rstrip(\"/\") def _is_subdir(subdir,", "one del cells[:cell + 1] # scrub cell output for c in cells:", "Javascript from radiopadre_utils.notebook_utils import scrub_cell from radiopadre import settings_manager from radiopadre.render import render_error,", "= True # import stuff from .file import autodetect_file_type from .datadir import DataDir,", "LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT,", "loaded from radiopadre-kernel.js already, but reloading is useful for debugging styles_file = os.path.join(os.path.dirname(__file__),", "for debugging styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html += f\"\"\"<style> {open(styles_file).read()} </style>\"\"\" html +=", "document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) # reload styles -- loaded from radiopadre-kernel.js already, but reloading", "cachedir is None: if not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to view non-writeable directory, but", "newpath, cell=0, copy_dirs='dirs', copy_root='root'): # read notebook data data = open(oldpath).read() version =", "data={ \"text/html\": [\"<b style='color: red'>Please select Cell|Run all from the menu to render", "subdir, if so required if subdir: cacheurl += \"/\" + subdir cachedir +=", "ImageFile from .casatable import CasaTable from .htmlfile import HTMLFile, URL from .table import", "by the kernel, pull from it from radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME, \\", "[(\"\", \"startup log follows:\")] data += radiopadre_kernel.log_handler.get_records() from IPython.display import HTML display(HTML(render_table(data, [\"\",", "unprotect(): # \"\"\"Makes current notebook unprotected.\"\"\" # display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook is now", "# convert to current format current_version = nbformat.current_nbformat nbdata = nbformat.convert(nbdata, current_version) current_format", "+= radiopadre_kernel.log_handler.get_records() from IPython.display import HTML display(HTML(render_table(data, [\"\", \"\"], numbering=False))) def display_log(debug=False): from", "first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR): # if in a subdirectory off", "os.access(cachedir, os.W_OK): raise RuntimeError(\"Cache directory {} not user-writeable. Try removing it?\".format(cachedir)) # make", "cells.insert(0, current_format.new_code_cell(code, outputs=[output])) # insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"# %s\\nThis radiopadre notebook was automatically", "\"%s = radiopadre.DirList('.')\" % copy_dirs if copy_root: code += \"\\n%s = %s[0]\" %", "def protect(author=None): # \"\"\"Makes current notebook protected with the given author name. Protected", "= _display_reset, settings_manager.DocString(\"call this to reset sizes explicitly\") html = \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}');", "notebook. \"\"\" % (newpath, oldpath), )) # cleanup metadata metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub']", "\"text/html\": [\"<b style='color: red'>Please select Cell|Run all from the menu to render this", "all from the menu to render this notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code, outputs=[output])) #", "errors = radiopadre_kernel.log_handler.get_records('WARNING') if errors: html += render_table(errors, [\"\", \"\"], numbering=False) display(HTML(html)) def", "reldir = basedir[len(ABSROOTDIR):] elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR): reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):] else: raise RuntimeError(\"Trying to", "from .imagefile import ImageFile from .casatable import CasaTable from .htmlfile import HTMLFile, URL", "show_exception, TransientMessage, render_status_message, render_table # this stuff is setup by the kernel, pull", "to render this notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code, outputs=[output])) # insert markdown cells.insert(0, current_format.new_markdown_cell(\"\"\"#", "url_path). The former is the (shadow) filesystem location of the directory. The latter", "from IPython.display import HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\" if debug else \"INFO\") display(HTML(render_table(data, [\"\",", "it in the shadow tree. if os.access(basedir, os.W_OK): cachedir = basedir + \"/.radiopadre\"", "# insert output output = current_format.new_output(\"display_data\", data={ \"text/html\": [\"<b style='color: red'>Please select Cell|Run", "= \\ cell_width, window_width, window_height # def protect(author=None): # \"\"\"Makes current notebook protected", "js9 if not js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC # get buttons from various modules", "cleanup metadata metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub'] = 0 if 'signature' in metadata: metadata['signature']", "import DataDir, ls, lsR, lst, lsrt from .filelist import FileList from .fitsfile import", "= getattr(nbformat, 'v' + str(current_version)) # accommodate worksheets, if available if hasattr(nbdata, 'worksheets'):", "of warnings and errors from init errors = radiopadre_kernel.log_handler.get_records('WARNING') if errors: html +=", "author or os.environ['USER'] # display(Javascript(\"document.radiopadre.protect('%s')\" % author)) # display(HTML(render_status_message(\"\"\"This notebook is now protected,", "found\") return None get_ipython().magic(\"matplotlib inline\") settings.display.reset = _display_reset, settings_manager.DocString(\"call this to reset sizes", "well bomb out if cachedir is None: if not SHADOW_URL_PREFIX: raise RuntimeError(\"Trying to", "init try: import astropy astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed to import astropy\") # NONE", "NAMES ABOVE SHALL HAVE A TRALING SLASH!!! def _strip_slash(path): return path if path", "= _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR): # if in a subdirectory off the root,", "radiopadre-kernel.js already, but reloading is useful for debugging styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html", "this notebook. \"\"\" % (newpath, oldpath), )) # cleanup metadata metadata['radiopadre_notebook_protect'] = 0", "SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT SESSION_DIR SESSION_URL\"\"\".split(): data.append((varname, globals()[varname])) data", "+= fitsfile.add_general_buttons() # get list of warnings and errors from init errors =", "display(HTML(render_table(data, [\"\", \"\"], numbering=False))) def display_log(debug=False): from IPython.display import HTML data = radiopadre_kernel.log_handler.get_records(\"DEBUG\"", "RuntimeError(\"radiopadre.init() must be called first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR): # if", "location of the directory. The latter is the URL to this directory. \"\"\"", "A TRALING SLASH!!! def _strip_slash(path): return path if path == \"/\" or path", "\"%s\". # All other users will have to treat this notebook as read-only.\"\"\"", "CACHE_URL_ROOT, \\ SESSION_DIR, SESSION_URL, NBCONVERT # init settings settings = settings_manager.RadiopadreSettingsManager() try: __version__", "DIR NAMES ABOVE SHALL HAVE A TRALING SLASH!!! def _strip_slash(path): return path if", "return cachedir, cacheurl _init_js_side_done = None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\")) def _init_js_side(): \"\"\"Checks that", "+= \"\"\"<DIV onload=radiopadre.document.reset_display_settings></DIV>\"\"\" from radiopadre import layouts html += layouts.init_html from radiopadre_kernel import", "code = \"import radiopadre\\n\" + \\ \"%s = radiopadre.DirList('.')\" % copy_dirs if copy_root:", "menu to render this notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code, outputs=[output])) # insert markdown cells.insert(0,", "get list of warnings and errors from init errors = radiopadre_kernel.log_handler.get_records('WARNING') if errors:", "worksheets\") metadata = nbdata['metadata'] cells = nbdata['cells'] # strip out all cells up", "we weren't able to make a writeable one in the main tree --", "None else path.rstrip(\"/\") def _is_subdir(subdir, parent): return subdir == parent or subdir.startswith(parent+\"/\") from", "status data = [ (\"cwd\", os.getcwd()) ] for varname in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR", "select Cell|Run all from the menu to render this notebook.</b>\"] }) cells.insert(0, current_format.new_code_cell(code,", "radiopadre.DirList('.')\" % copy_dirs if copy_root: code += \"\\n%s = %s[0]\" % (copy_root, copy_dirs)", "setup status data = [ (\"cwd\", os.getcwd()) ] for varname in \"\"\"SESSION_ID ROOTDIR", "__version__ = \"development\" ## various notebook-related init try: import astropy astropy.log.setLevel('ERROR') except ImportError:", "strip out all cells up to and including indicated one del cells[:cell +", "import astropy astropy.log.setLevel('ERROR') except ImportError: radiopadre_kernel.log.warning(\"Failed to import astropy\") # NONE OF THE", "import js9 if not js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC # get buttons from various", "\"Cell|Run all\" from the menu to render this notebook. \"\"\" % (newpath, oldpath),", "all\" from the menu to render this notebook. \"\"\" % (newpath, oldpath), ))", "or subdir.startswith(parent+\"/\") from radiopadre_kernel import _make_symlink def display_status(): # setup status data =", "directory, but access to the shadow tree is not set up. This is", "= 0 if 'signature' in metadata: metadata['signature'] = \"\" # save nbformat.write(nbdata, open(newpath,", "import json import nbformat import os import pkg_resources import radiopadre_kernel from IPython.display import", "layouts.init_html from radiopadre_kernel import js9 if not js9.JS9_ERROR: html += js9.JS9_INIT_HTML_DYNAMIC # get", "basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR): # if in a subdirectory off the", "data.append((varname, globals()[varname])) data += [(\"\", \"startup log follows:\")] data += radiopadre_kernel.log_handler.get_records() from IPython.display", "\"/\" + subdir cachedir += \"/\" + subdir if not os.path.exists(cachedir): os.mkdir(cachedir) return", "reset sizes explicitly\") html = \"\"\"<script type='text/javascript'> document.radiopadre.register_user('{}'); document.radiopadre.reset_display_settings(); </script> \"\"\".format(os.environ['USER']) # reload", "metadata metadata['radiopadre_notebook_protect'] = 0 metadata['radiopadre_notebook_scrub'] = 0 if 'signature' in metadata: metadata['signature'] =", "tree # if this fails, we're stuck, so may as well bomb out", "cell output for c in cells: scrub_cell(c) # insert boilerplate code code =", "= False # print(\"importing radiopadre\") if not __init: from radiopadre_kernel import casacore_tables radiopadre_kernel.log.info(\"initializing", "must be called first\") basedir = _strip_slash(os.path.abspath(os.path.dirname(path))) if _is_subdir(basedir, ABSROOTDIR): # if in", "if not os.path.exists(cachedir): os.mkdir(cachedir) return cachedir, cacheurl _init_js_side_done = None def _display_reset(): display(Javascript(\"document.radiopadre.reset_display_settings();\"))", "return None get_ipython().magic(\"matplotlib inline\") settings.display.reset = _display_reset, settings_manager.DocString(\"call this to reset sizes explicitly\")", "make a writeable one in the main tree -- use shadow tree #", "display(HTML(render_status_message(\"\"\"This notebook is now protected, author is \"%s\". # All other users will", "make a symlink to it in the shadow tree. if os.access(basedir, os.W_OK): cachedir", "reloading is useful for debugging styles_file = os.path.join(os.path.dirname(__file__), \"html/radiopadre.css\") html += f\"\"\"<style> {open(styles_file).read()}", "ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \\ SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \\ FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT,", "this notebook as read-only.\"\"\" % author))) # # # def unprotect(): # \"\"\"Makes", "from .fitsfile import FITSFile from .imagefile import ImageFile from .casatable import CasaTable from", "caching radiopadre stuff associated with the given file. Returns tuple of (real_path, url_path).", "if cachedir remains None, we weren't able to make a writeable one in", "display(Javascript(\"document.radiopadre.unprotect()\")) # display(HTML(render_status_message(\"\"\"This notebook is now unprotected. # All users can treat it", "kernel, pull from it from radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME, \\ LOGFILE, ABSROOTDIR,", "side\") # print(\"initializing radiopadre\") _init_js_side() __init = True # import stuff from .file", "for c in cells: scrub_cell(c) # insert boilerplate code code = \"import radiopadre\\n\"", "can write to the basedir, make a .radiopadre dir within, and make a", "metadata['signature'] = \"\" # save nbformat.write(nbdata, open(newpath, 'w'), version) return newpath __init =", "return subdir == parent or subdir.startswith(parent+\"/\") from radiopadre_kernel import _make_symlink def display_status(): #", "varname in \"\"\"SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE", "{}\".format(shadowdir)) shadowdir += \"/.radiopadre\" _make_symlink(cachedir, shadowdir) cachedir = shadowdir else: cachedir = None", "to and including indicated one del cells[:cell + 1] # scrub cell output", "render_table(errors, [\"\", \"\"], numbering=False) display(HTML(html)) def hide_cell_code(hide=True): display(Javascript(f\"document.radiopadre.set_show_code({int(not hide)});\")) def set_window_sizes(cell_width, window_width, window_height):", "not set up. This is a bug.\") cachedir = shadowdir + \"/.radiopadre\" if", "(copy_root, copy_dirs) code += \"\\n%s.show()\" % copy_dirs # insert output output = current_format.new_output(\"display_data\",", "else: cachedir = None # if cachedir remains None, we weren't able to", "# this stuff is setup by the kernel, pull from it from radiopadre_kernel" ]
[ "-> int: wordList = set(wordList) if endWord not in wordList: return 0 q", "= q.popleft() if cur_node == endWord: return step for i in range(len(cur_node)): for", "for c in 'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i] + c + cur_node[i+1:] if next_node", "1 for _ in range(sz): cur_node = q.popleft() if cur_node == endWord: return", "set(wordList) if endWord not in wordList: return 0 q = deque([beginWord]) step =", "if cur_node == endWord: return step for i in range(len(cur_node)): for c in", "sz = len(q) step += 1 for _ in range(sz): cur_node = q.popleft()", "return step for i in range(len(cur_node)): for c in 'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i]", "deque([beginWord]) step = 0 wordList.discard(beginWord) while len(q) != 0: sz = len(q) step", "collections import Counter from collections import deque class Solution: def ladderLength(self, beginWord: str,", "from collections import deque class Solution: def ladderLength(self, beginWord: str, endWord: str, wordList:", "str, wordList: List[str]) -> int: wordList = set(wordList) if endWord not in wordList:", "wordList: List[str]) -> int: wordList = set(wordList) if endWord not in wordList: return", "step += 1 for _ in range(sz): cur_node = q.popleft() if cur_node ==", "range(len(cur_node)): for c in 'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i] + c + cur_node[i+1:] if", "len(q) step += 1 for _ in range(sz): cur_node = q.popleft() if cur_node", "wordList.discard(beginWord) while len(q) != 0: sz = len(q) step += 1 for _", "0 wordList.discard(beginWord) while len(q) != 0: sz = len(q) step += 1 for", "not in wordList: return 0 q = deque([beginWord]) step = 0 wordList.discard(beginWord) while", "q.popleft() if cur_node == endWord: return step for i in range(len(cur_node)): for c", "Counter from collections import deque class Solution: def ladderLength(self, beginWord: str, endWord: str,", "Solution: def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int: wordList =", "+= 1 for _ in range(sz): cur_node = q.popleft() if cur_node == endWord:", "from collections import Counter from collections import deque class Solution: def ladderLength(self, beginWord:", "= cur_node[:i] + c + cur_node[i+1:] if next_node in wordList: wordList.remove(next_node) q.append(next_node) return", "for i in range(len(cur_node)): for c in 'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i] + c", "c in 'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i] + c + cur_node[i+1:] if next_node in", "endWord not in wordList: return 0 q = deque([beginWord]) step = 0 wordList.discard(beginWord)", "endWord: str, wordList: List[str]) -> int: wordList = set(wordList) if endWord not in", "class Solution: def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int: wordList", "next_node = cur_node[:i] + c + cur_node[i+1:] if next_node in wordList: wordList.remove(next_node) q.append(next_node)", "step for i in range(len(cur_node)): for c in 'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i] +", "List[str]) -> int: wordList = set(wordList) if endWord not in wordList: return 0", "while len(q) != 0: sz = len(q) step += 1 for _ in", "cur_node[:i] + c + cur_node[i+1:] if next_node in wordList: wordList.remove(next_node) q.append(next_node) return 0", "import deque class Solution: def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) ->", "== endWord: return step for i in range(len(cur_node)): for c in 'abcdefghijklmnopqrstuvwxyz': next_node", "str, endWord: str, wordList: List[str]) -> int: wordList = set(wordList) if endWord not", "i in range(len(cur_node)): for c in 'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i] + c +", "wordList = set(wordList) if endWord not in wordList: return 0 q = deque([beginWord])", "import Counter from collections import deque class Solution: def ladderLength(self, beginWord: str, endWord:", "collections import deque class Solution: def ladderLength(self, beginWord: str, endWord: str, wordList: List[str])", "wordList: return 0 q = deque([beginWord]) step = 0 wordList.discard(beginWord) while len(q) !=", "return 0 q = deque([beginWord]) step = 0 wordList.discard(beginWord) while len(q) != 0:", "= 0 wordList.discard(beginWord) while len(q) != 0: sz = len(q) step += 1", "len(q) != 0: sz = len(q) step += 1 for _ in range(sz):", "= deque([beginWord]) step = 0 wordList.discard(beginWord) while len(q) != 0: sz = len(q)", "!= 0: sz = len(q) step += 1 for _ in range(sz): cur_node", "in range(sz): cur_node = q.popleft() if cur_node == endWord: return step for i", "in 'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i] + c + cur_node[i+1:] if next_node in wordList:", "step = 0 wordList.discard(beginWord) while len(q) != 0: sz = len(q) step +=", "= len(q) step += 1 for _ in range(sz): cur_node = q.popleft() if", "range(sz): cur_node = q.popleft() if cur_node == endWord: return step for i in", "beginWord: str, endWord: str, wordList: List[str]) -> int: wordList = set(wordList) if endWord", "cur_node == endWord: return step for i in range(len(cur_node)): for c in 'abcdefghijklmnopqrstuvwxyz':", "for _ in range(sz): cur_node = q.popleft() if cur_node == endWord: return step", "= set(wordList) if endWord not in wordList: return 0 q = deque([beginWord]) step", "if endWord not in wordList: return 0 q = deque([beginWord]) step = 0", "cur_node = q.popleft() if cur_node == endWord: return step for i in range(len(cur_node)):", "endWord: return step for i in range(len(cur_node)): for c in 'abcdefghijklmnopqrstuvwxyz': next_node =", "in range(len(cur_node)): for c in 'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i] + c + cur_node[i+1:]", "def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int: wordList = set(wordList)", "q = deque([beginWord]) step = 0 wordList.discard(beginWord) while len(q) != 0: sz =", "int: wordList = set(wordList) if endWord not in wordList: return 0 q =", "in wordList: return 0 q = deque([beginWord]) step = 0 wordList.discard(beginWord) while len(q)", "0 q = deque([beginWord]) step = 0 wordList.discard(beginWord) while len(q) != 0: sz", "0: sz = len(q) step += 1 for _ in range(sz): cur_node =", "_ in range(sz): cur_node = q.popleft() if cur_node == endWord: return step for", "'abcdefghijklmnopqrstuvwxyz': next_node = cur_node[:i] + c + cur_node[i+1:] if next_node in wordList: wordList.remove(next_node)", "deque class Solution: def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:", "ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int: wordList = set(wordList) if" ]
[ "from screen import oled from config import CRIPTOS import criptos import time while", "while True: oled.fill(0) oled.text(\"Consultando\", 1, 1, 1) oled.show() for moeda in CRIPTOS: cotacao", "time while True: oled.fill(0) oled.text(\"Consultando\", 1, 1, 1) oled.show() for moeda in CRIPTOS:", "import time while True: oled.fill(0) oled.text(\"Consultando\", 1, 1, 1) oled.show() for moeda in", "CRIPTOS import criptos import time while True: oled.fill(0) oled.text(\"Consultando\", 1, 1, 1) oled.show()", "screen import oled from config import CRIPTOS import criptos import time while True:", "{}\".format(cotacao['moeda']) , 1, 1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1, 10, 1) oled.text(\"Venda: {}\".format(cotacao['venda']), 1,", "1) oled.show() for moeda in CRIPTOS: cotacao = criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) ,", "1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1, 10, 1) oled.text(\"Venda: {}\".format(cotacao['venda']), 1, 19, 1) oled.show()", "oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1, 1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1, 10, 1) oled.text(\"Venda: {}\".format(cotacao['venda']),", "criptos import time while True: oled.fill(0) oled.text(\"Consultando\", 1, 1, 1) oled.show() for moeda", "1, 1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1, 10, 1) oled.text(\"Venda: {}\".format(cotacao['venda']), 1, 19, 1)", "cotacao = criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1, 1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1,", "oled.fill(0) oled.text(\"Consultando\", 1, 1, 1) oled.show() for moeda in CRIPTOS: cotacao = criptos.do_criptos(moeda)", "= criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1, 1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1, 10,", "oled from config import CRIPTOS import criptos import time while True: oled.fill(0) oled.text(\"Consultando\",", "oled.show() for moeda in CRIPTOS: cotacao = criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1,", "import criptos import time while True: oled.fill(0) oled.text(\"Consultando\", 1, 1, 1) oled.show() for", "1, 1, 1) oled.show() for moeda in CRIPTOS: cotacao = criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda:", "from config import CRIPTOS import criptos import time while True: oled.fill(0) oled.text(\"Consultando\", 1,", "for moeda in CRIPTOS: cotacao = criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1, 1,", "import CRIPTOS import criptos import time while True: oled.fill(0) oled.text(\"Consultando\", 1, 1, 1)", "import oled from config import CRIPTOS import criptos import time while True: oled.fill(0)", "True: oled.fill(0) oled.text(\"Consultando\", 1, 1, 1) oled.show() for moeda in CRIPTOS: cotacao =", "<filename>main.py from screen import oled from config import CRIPTOS import criptos import time", "moeda in CRIPTOS: cotacao = criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1, 1, 1)", "criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1, 1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1, 10, 1)", "oled.text(\"Consultando\", 1, 1, 1) oled.show() for moeda in CRIPTOS: cotacao = criptos.do_criptos(moeda) oled.fill(0)", "oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1, 1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1, 10, 1) oled.text(\"Venda:", "config import CRIPTOS import criptos import time while True: oled.fill(0) oled.text(\"Consultando\", 1, 1,", "CRIPTOS: cotacao = criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1, 1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']),", "1, 1) oled.show() for moeda in CRIPTOS: cotacao = criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda'])", "in CRIPTOS: cotacao = criptos.do_criptos(moeda) oled.fill(0) oled.text(\"Moeda: {}\".format(cotacao['moeda']) , 1, 1, 1) oled.text(\"Compra:", ", 1, 1, 1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1, 10, 1) oled.text(\"Venda: {}\".format(cotacao['venda']), 1, 19,", "1) oled.text(\"Compra: {}\".format(cotacao['compra']), 1, 10, 1) oled.text(\"Venda: {}\".format(cotacao['venda']), 1, 19, 1) oled.show() time.sleep(5)" ]
[ "we take just the first one, a lil bit hardcoded i know if", "monitoring self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()} self.best_epoch = None self.best_score = None", "import Enum from posix import listdir from typing import TYPE_CHECKING, Any, Dict, Iterable", "y_true) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return loss, (images.cpu(),", "self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) # debug if active if self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss,", "scores.items(): if metric_name in exclude: continue if score.ndim > 0: # store for", "self._store_samples(images, y_pred, y_true) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return", "metrics # we group only the 'standard' images, not the rotated ones y_true", "self.scheduler = scheduler self.logger = logger or EmptyLogger() # setup metrics, if any", "def _update_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor, stage: TrainerStage = TrainerStage.train) -> None: with", "if present. Requires a plot callback # better to unpack now, so that", "kdd_criterion: nn.Module, kde_criterion: nn.Module = None, kdd_lambda: float = 0.0, kde_lambda: float =", "old_out) # sum up losses total = seg_loss + self.kdd_lambda * kdd_loss #", "v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for callback in self.callbacks: callback(self) except KeyboardInterrupt:", "for callback in self.callbacks: callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting training\", curr_epoch) break", "for AMP self.optimizer = optimizer self.scheduler = scheduler self.logger = logger or EmptyLogger()", "self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) # store results for name, val in", "loss on main task, using AMP with self.accelerator.autocast(): new_out, new_features = self.model(x) seg_loss", "full string once completed LOG.info(\", \".join(log_strings)) # then log class-wise results in a", "[] # prepare model and loader, pass as val loader to store num", "internal state self.rank = get_rank() self.is_main = self.rank == 0 self.current_epoch = -1", "* kdd_loss # gather and update metrics # we group only the 'standard'", "iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times) if val_dataloader is not None: self.validation_epoch_start() v_losses, v_times =", "self.kdd_lambda = kdd_lambda self.kde_lambda = kde_lambda self.multimodal = isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd = MultiModalScaling()", "with the batch size later # also, we take just the first one,", "and update metrics # we group only the 'standard' images, not the rotated", "name, values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader: DataLoader,", "self.sample_batches is not None and self.sample_batches > 0: self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False)", "self.criterion(preds, y) # gather info images = self.accelerator.gather(x) y_true = self.accelerator.gather(y) y_pred =", "list() def _prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None) -> None: self.model, self.optimizer", "batch_index=i) loss = data[\"tot_loss\"] elapsed = (time.time() - start) # gather info loss_val", "train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch:", "old_classes: Dict[int, str], new_classes: Dict[int, str], seg_criterion: nn.Module, kdd_criterion: nn.Module, kde_criterion: nn.Module =", "losses and retrieve x, y x, y = batch # forward and loss", "batch_index: int): x, y = batch x = x.to(self.accelerator.device) y = y.to(self.accelerator.device) #", "dtype=seg_loss.dtype) if self.task.step > 0: old_out, _ = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out)", "continue if score.ndim > 0: # store for later classwise[metric_name] = score continue", "for name, values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader:", "samples with a list of indices with same length if self.sample_batches is not", "the logger self.step() return losses, timings def train_epoch_end(self, train_losses: dict, train_times: list): with", "LOG.debug(\"Classwise: %s\", str(classwise)) header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def _debug_training(self, **kwargs: dict)", "start) # store training info self.current_loss = loss.mean() loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"})", "-1 self.current_loss = None self.global_step = -1 # internal monitoring self.current_scores = {TrainerStage.train.value:", "task with self.accelerator.autocast(): new_out, _ = self.model(x) seg_loss = self.criterion(new_out, y) # this", "(images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def predict(self, test_dataloader: DataLoader, metrics: Dict[str, Metric], logger_exclude: Iterable[str]", "callback in self.callbacks: callback.dispose(self) def add_metrics(self, stage: TrainerStage, metrics: Dict[str, Metric]) -> Trainer:", "if any self.metrics = dict() if train_metrics is not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if", "images[i].detach().cpu() true_mask = targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask)) def add_callback(self, callback:", "Optimizer from torch.utils.data import DataLoader from saticl.logging import BaseLogger from saticl.logging.empty import EmptyLogger", "-> Any: val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main) timings = [] losses", "0: # store for later classwise[metric_name] = score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\")", "old_features = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) total = seg_loss + self.kdd_lambda *", "time.time() data = self.validation_batch(batch=batch, batch_index=i) loss = data[\"tot_loss\"] elapsed = (time.time() - start)", "TrainerStage = TrainerStage.train) -> None: result = dict() with torch.no_grad(): for name, metric", "np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration on callbacks for the test set (e.g.", "self.model.eval() for i, batch in enumerate(val_tqdm): start = time.time() data = self.validation_batch(batch=batch, batch_index=i)", "# first log scalars for metric_name, score in scores.items(): if metric_name in exclude:", "we group only the 'standard' images, not the rotated ones y_true = self.accelerator.gather(y)", "self.criterion_kdd(new_out, old_out) total = seg_loss + self.kdd_lambda * kdd_loss y_true = self.accelerator.gather(y) y_pred", "metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu())", "step 1 onwards kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step > 0: old_out,", "# also, we take just the first one, a lil bit hardcoded i", "self.multimodal = isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd = MultiModalScaling() # optimizer, scheduler and logger, scaler", "= (time.time() - start) # gather info loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) #", "because of the prepare # we swap an integer of num samples with", "size later # also, we take just the first one, a lil bit", "AMP self.optimizer = optimizer self.scheduler = scheduler self.logger = logger or EmptyLogger() #", "y_true[:1]) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\": total,", "import numpy as np import torch from accelerate import Accelerator from torch import", "for loggers self.task = task self.old_classes = old_classes self.new_classes = new_classes self.all_classes =", "x, y x, y = batch seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0) # forward", "DataLoader, metrics: Dict[str, Metric], logger_exclude: Iterable[str] = None, return_preds: bool = False): logger_exclude", "logger, scaler for AMP self.optimizer = optimizer self.scheduler = scheduler self.logger = logger", "is not None or stage == \"test\", \"ICL steps require the old model", "self.optimizer = optimizer self.scheduler = scheduler self.logger = logger or EmptyLogger() # setup", "for later classwise[metric_name] = score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log the", "know if self.sample_batches is not None and batch_index in self.sample_batches: images = self.accelerator.gather(x)", "AMP with self.accelerator.autocast(): preds, _ = self.model(x) loss = self.criterion(preds, y) # gather", "val_metrics is not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL information for loggers self.task =", "as for validation losses.append(loss_value) timings.append(elapsed) if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test)", "for validation losses.append(loss_value) timings.append(elapsed) if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test,", "self.validation_epoch_start() v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for callback in self.callbacks: callback(self)", "+ list(new_classes.items())) # internal state self.rank = get_rank() self.is_main = self.rank == 0", "= None, return_preds: bool = False): logger_exclude = logger_exclude or [] self.metrics[TrainerStage.test.value] =", "data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return losses, timings def validation_epoch_end(self, val_losses: list, val_times: list): with", "train_losses: dict, train_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name, values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\",", "train_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name, values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\",", "-> torch.Tensor: # init losses and retrieve x, y x, y = batch", "# store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return losses, timings", "\"val\" test = \"test\" class Trainer: def __init__(self, accelerator: Accelerator, task: Task, new_model:", "= None, val_metrics: Dict[str, Metric] = None, logger: BaseLogger = None, samples: int", "self.current_epoch = curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) #", "0.0, kde_lambda: float = 0.0, train_metrics: Dict[str, Metric] = None, val_metrics: Dict[str, Metric]", "self.metrics[stage.value].values(): metric(y_true, y_pred) def _compute_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: result =", "self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log the full string once completed LOG.info(\", \".join(log_strings))", "but it's best to call it every epoch instead of iteration self.scheduler.step() self.train_epoch_end(t_losses,", "= loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) # store results", "plot callback # better to unpack now, so that we don't have to", "score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log the full string once completed LOG.info(\", \".join(log_strings)) #", "Any, batch_index: int): x, y = batch x = x.to(self.accelerator.device) y = y.to(self.accelerator.device)", "+= 1 self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch: Any) -> torch.Tensor: #", "# we group only the 'standard' images, not the rotated ones y_true =", "debug if active if self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\": total,", "= get_rank() self.is_main = self.rank == 0 self.current_epoch = -1 self.current_loss = None", "test set (e.g. display images) for callback in self.callbacks: callback(self) return losses, results", "import Task from saticl.utils.common import get_logger, progressbar from saticl.utils.decorators import get_rank if TYPE_CHECKING:", "has effect from step 1 onwards kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step", "targets: torch.Tensor) -> None: for i in range(images.size(0)): image = images[i].detach().cpu() true_mask =", "in self.callbacks: callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting training\", curr_epoch) break self.dispose_callbacks() return", "= progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main) self.model.train() for batch in train_tqdm: start =", "bit hardcoded i know if self.sample_batches is not None and batch_index in self.sample_batches:", "measure elapsed time elapsed = (time.time() - start) # store training info self.current_loss", "torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor) -> None: for i in range(images.size(0)): image =", "= None, max_epochs: int = 100): train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader) self.best_state_dict =", "distillation: KDD = KD on decoder, KDE = KD on encoder self.criterion_kdd =", "loss on main task, using AMP with self.accelerator.autocast(): preds, _ = self.model(x) loss", "= self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) # debug if active if self.debug: self._debug_training(x=x.dtype, y=y.dtype,", "-> Any: timings = [] losses = defaultdict(list) train_tqdm = progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value,", "= old_classes self.new_classes = new_classes self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items())) # internal state", "= seg_loss + self.kdd_lambda * kdd_loss y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) #", "int, train_dataloader: DataLoader) -> Any: timings = [] losses = defaultdict(list) train_tqdm =", "= self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for callback in self.callbacks: callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch", "def add_metrics(self, stage: TrainerStage, metrics: Dict[str, Metric]) -> Trainer: assert stage.value not in", "nn.Module, kde_criterion: nn.Module = None, kdd_lambda: float = 0.0, kde_lambda: float = 0.0,", "results = [], [], [] # prepare model and loader, pass as val", "metrics\" self.metrics[stage.value] = metrics def step(self) -> None: self.global_step += 1 self.logger.step() def", "to unpack now, so that we don't have to deal with the batch", "dict, train_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name, values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values))", "not log 'iter' versions, as for validation losses.append(loss_value) timings.append(elapsed) if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\",", "on segmentation task with self.accelerator.autocast(): new_out, _ = self.model(x) seg_loss = self.criterion(new_out, y)", "self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration on callbacks for the", "= self.criterion_kdd(new_out, old_out) # sum up losses total = seg_loss + self.kdd_lambda *", "call it, but it's best to call it every epoch instead of iteration", "present. Requires a plot callback # better to unpack now, so that we", "= False): logger_exclude = logger_exclude or [] self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm =", "loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) # store results for name, val in data.items():", "on callbacks for the test set (e.g. display images) for callback in self.callbacks:", "Dict[int, str], new_classes: Dict[int, str], seg_criterion: nn.Module, kdd_criterion: nn.Module, kde_criterion: nn.Module = None,", "if self.sample_batches is not None and batch_index in self.sample_batches: images = self.accelerator.gather(x) self._store_samples(images[:1],", "None: for callback in self.callbacks: callback.dispose(self) def add_metrics(self, stage: TrainerStage, metrics: Dict[str, Metric])", "images = self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1]) # update metrics and return losses self._update_metrics(y_true=y_true,", "Metric]) -> Trainer: assert stage.value not in self.metrics, \"stage already present in metrics\"", "torch.Tensor) -> None: for i in range(images.size(0)): image = images[i].detach().cpu() true_mask = targets[i].detach().cpu()", "self def test_batch(self, batch: Any, batch_index: int): x, y = batch x =", "= defaultdict(list) train_tqdm = progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main) self.model.train() for batch in", "self.accelerator = accelerator self.debug = debug self.model = new_model self.old_model = old_model self.criterion", "y_pred[:1], y_true[:1]) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\":", "and retrieve x, y x, y = batch seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0)", "\"train\" val = \"val\" test = \"test\" class Trainer: def __init__(self, accelerator: Accelerator,", "BaseLogger = None, samples: int = None, stage: str = \"train\", debug: bool", "don't have to deal with the batch size later # also, we take", "outputs: torch.Tensor, targets: torch.Tensor) -> None: for i in range(images.size(0)): image = images[i].detach().cpu()", "y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(preds) # store samples for visualization, if present.", "useless) # store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return losses,", "a lil bit hardcoded i know if self.sample_batches is not None and batch_index", "None: val_dataloader = self.accelerator.prepare(val_dataloader) # we need to do this here, because of", "effect from step 1 onwards kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step >", "defaultdict(list) with torch.no_grad(): self.model.eval() for i, batch in enumerate(val_tqdm): start = time.time() data", "= torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step > 0: old_out, _ = self.old_model(x) kdd_loss", "None self.sample_batches = samples self.sample_content = list() self.callbacks: listdir[BaseCallback] = list() def _prepare(self,", "with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name, values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train)", "= kdd_criterion self.criterion_kde = kde_criterion self.kdd_lambda = kdd_lambda self.kde_lambda = kde_lambda self.multimodal =", "import annotations import time from collections import OrderedDict, defaultdict from enum import Enum", "self.sample_content = list() self.callbacks: listdir[BaseCallback] = list() def _prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader", "self.rank = get_rank() self.is_main = self.rank == 0 self.current_epoch = -1 self.current_loss =", "MultiModalScaling from saticl.metrics import Metric from saticl.models.encoders import MultiEncoder from saticl.tasks import Task", "and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def", "return self def setup_callbacks(self) -> None: for callback in self.callbacks: callback.setup(self) def dispose_callbacks(self)", "with same length if self.sample_batches is not None and self.sample_batches > 0: self.sample_batches", "TrainerStage, metrics: Dict[str, Metric]) -> Trainer: assert stage.value not in self.metrics, \"stage already", "batch_index=i) elapsed = (time.time() - start) loss_value = loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we", "of iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times) if val_dataloader is not None: self.validation_epoch_start() v_losses, v_times", "range(images.size(0)): image = images[i].detach().cpu() true_mask = targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask))", "# init losses and retrieve x, y x, y = batch # forward", "self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main) losses, timings, results", "train_metrics is not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics is not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics)", "= self.criterion_kdd(new_out, old_out) total = seg_loss + self.kdd_lambda * kdd_loss y_true = self.accelerator.gather(y)", "val_dataloader def _update_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor, stage: TrainerStage = TrainerStage.train) -> None:", "swap an integer of num samples with a list of indices with same", "val_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name, values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\",", "range(max_epochs): self.current_epoch = curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader)", "= images[i].detach().cpu() true_mask = targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask)) def add_callback(self,", "if self.sample_batches is not None and batch_index in self.sample_batches: self._store_samples(images, y_pred, y_true) #", "= TrainerStage.train) -> None: result = dict() with torch.no_grad(): for name, metric in", "Enum): train = \"train\" val = \"val\" test = \"test\" class Trainer: def", "# setup metrics, if any self.metrics = dict() if train_metrics is not None:", "not log 'iter' versions for loss and timings, since we do not advance", "self.logger.log_scalar(\"train/time_iter\", elapsed) # store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) #", "timings, results = [], [], [] # prepare model and loader, pass as", "torch.utils.data import DataLoader from saticl.logging import BaseLogger from saticl.logging.empty import EmptyLogger from saticl.losses.regularization", "= None self.best_score = None self.best_state_dict = None self.sample_batches = samples self.sample_content =", "= new_model self.old_model = old_model self.criterion = seg_criterion # knowledge distillation: KDD =", "= self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not the best place to call it, but it's", "self.accelerator.gather(new_out) # store samples for visualization, if present. Requires a plot callback #", "store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return losses, timings def", "else: self.sample_batches = np.array([]) return train_dataloader, val_dataloader def _update_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor,", "with torch.no_grad(): self.model.eval() for i, batch in enumerate(test_tqdm): start = time.time() loss, data", "pred_mask = outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask)) def add_callback(self, callback: BaseCallback) -> Trainer: self.callbacks.append(callback)", "log_strings = [] exclude = exclude or [] scores = self.current_scores[stage.value] classwise =", "y) # this only has effect from step 1 onwards kdd_loss = torch.tensor(0,", "onwards kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step > 0: old_out, _ =", "= dict() if train_metrics is not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics is not", "state self.rank = get_rank() self.is_main = self.rank == 0 self.current_epoch = -1 self.current_loss", "np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None, max_epochs:", "callback.dispose(self) def add_metrics(self, stage: TrainerStage, metrics: Dict[str, Metric]) -> Trainer: assert stage.value not", "Task from saticl.utils.common import get_logger, progressbar from saticl.utils.decorators import get_rank if TYPE_CHECKING: from", "dict() # first log scalars for metric_name, score in scores.items(): if metric_name in", "total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def validation_epoch(self, epoch: int, val_dataloader: DataLoader) -> Any:", "best to call it every epoch instead of iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times) if", "# measure elapsed time elapsed = (time.time() - start) # store training info", "self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()} self.best_epoch = None self.best_score = None self.best_state_dict", "def step(self) -> None: self.global_step += 1 self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self,", "in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader", "data = self.test_batch(batch=batch, batch_index=i) elapsed = (time.time() - start) loss_value = loss.item() test_tqdm.set_postfix({\"loss\":", "%2d] Interrupting training\", curr_epoch) break self.dispose_callbacks() return self def test_batch(self, batch: Any, batch_index:", "better to unpack now, so that we don't have to deal with the", "try: self.train_epoch_start() t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not the best place to", "samples: int = None, stage: str = \"train\", debug: bool = False) ->", "metric.reset() def _log_metrics(self, stage: TrainerStage = TrainerStage.train, exclude: Iterable[str] = None) -> None:", "self._store_samples(images[:1], y_pred[:1], y_true[:1]) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return", "list, val_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name, values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values))", "def dispose_callbacks(self) -> None: for callback in self.callbacks: callback.dispose(self) def add_metrics(self, stage: TrainerStage,", "= outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask)) def add_callback(self, callback: BaseCallback) -> Trainer: self.callbacks.append(callback) return", "# update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return loss, (images.cpu(), y_true.cpu(),", "validation losses.append(loss_value) timings.append(elapsed) if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude)", "if val_dataloader is not None: val_dataloader = self.accelerator.prepare(val_dataloader) # we need to do", "we don't have to deal with the batch size later if self.sample_batches is", "self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader) if val_dataloader is not None:", "completed LOG.info(\", \".join(log_strings)) # then log class-wise results in a single table if", "= debug self.model = new_model self.old_model = old_model self.criterion = seg_criterion # knowledge", "None) -> None: self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader) if val_dataloader", "KD\" self.accelerator = accelerator self.debug = debug self.model = new_model self.old_model = old_model", "DataLoader from saticl.logging import BaseLogger from saticl.logging.empty import EmptyLogger from saticl.losses.regularization import MultiModalScaling", "LOG.info(\"[Epoch %2d] Interrupting training\", curr_epoch) break self.dispose_callbacks() return self def test_batch(self, batch: Any,", "= MultiModalScaling() # optimizer, scheduler and logger, scaler for AMP self.optimizer = optimizer", "metric in self.metrics[stage.value].values(): metric.reset() def _log_metrics(self, stage: TrainerStage = TrainerStage.train, exclude: Iterable[str] =", "this only has effect from step 1 onwards kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype)", "def test_batch(self, batch: Any, batch_index: int): x, y = batch x = x.to(self.accelerator.device)", "forward and loss on main task, using AMP with self.accelerator.autocast(): preds, _ =", "disable=not self.is_main) timings = [] losses = defaultdict(list) with torch.no_grad(): self.model.eval() for i,", "x, y = batch x = x.to(self.accelerator.device) y = y.to(self.accelerator.device) # forward and", "= \"test\" class Trainer: def __init__(self, accelerator: Accelerator, task: Task, new_model: nn.Module, old_model:", "y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def train_epoch(self,", "y) # gather info images = self.accelerator.gather(x) y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(preds)", "on main task, using AMP with self.accelerator.autocast(): new_out, new_features = self.model(x) seg_loss =", "torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name, values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def", "0 for curr_epoch in range(max_epochs): self.current_epoch = curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses,", "get_logger(__name__) class TrainerStage(str, Enum): train = \"train\" val = \"val\" test = \"test\"", "with the batch size later if self.sample_batches is not None and batch_index in", "None, stage: str = \"train\", debug: bool = False) -> None: assert task.step", "val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader =", "batch in enumerate(test_tqdm): start = time.time() loss, data = self.test_batch(batch=batch, batch_index=i) elapsed =", "val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval() for i, batch in enumerate(test_tqdm): start = time.time() loss,", "np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else: self.sample_batches = np.array([]) return train_dataloader, val_dataloader def _update_metrics(self, y_true:", "dispose_callbacks(self) -> None: for callback in self.callbacks: callback.dispose(self) def add_metrics(self, stage: TrainerStage, metrics:", "and batch_index in self.sample_batches: self._store_samples(images, y_pred, y_true) # update metrics and return losses", "val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) # step the logger self.step() return losses, timings", "kdd_loss = self.criterion_kdd(new_out, old_out) total = seg_loss + self.kdd_lambda * kdd_loss y_true =", "# ICL information for loggers self.task = task self.old_classes = old_classes self.new_classes =", "None and self.sample_batches > 0: self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else: self.sample_batches =", "self.best_score = None self.best_state_dict = None self.sample_batches = samples self.sample_content = list() self.callbacks:", "stage=TrainerStage.val) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def validation_epoch(self, epoch: int, val_dataloader:", "val_losses: list, val_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name, values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\",", "the old model for KD\" self.accelerator = accelerator self.debug = debug self.model =", "name, metric in self.metrics[stage.value].items(): result[name] = metric.compute() self.current_scores[stage.value] = result def _reset_metrics(self, stage:", "it every epoch instead of iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times) if val_dataloader is not", "epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main) self.model.train() for batch in train_tqdm: start = time.time() self.optimizer.zero_grad()", "stage == \"test\", \"ICL steps require the old model for KD\" self.accelerator =", "loss and timings, since we do not advance the logger step # during", "kdd_loss = self.criterion_kdd(new_out, old_out) # sum up losses total = seg_loss + self.kdd_lambda", "None: self.validation_epoch_start() v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for callback in self.callbacks:", "self.model.eval() for i, batch in enumerate(test_tqdm): start = time.time() loss, data = self.test_batch(batch=batch,", "if TYPE_CHECKING: from saticl.trainer.callbacks import BaseCallback LOG = get_logger(__name__) class TrainerStage(str, Enum): train", "int): x, y = batch x = x.to(self.accelerator.device) y = y.to(self.accelerator.device) # forward", "val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we do not log 'iter' versions for loss and timings,", "exclude: Iterable[str] = None) -> None: log_strings = [] exclude = exclude or", "self.criterion_kdd = kdd_criterion self.criterion_kde = kde_criterion self.kdd_lambda = kdd_lambda self.kde_lambda = kde_lambda self.multimodal", "later # also, we take just the first one, a lil bit hardcoded", "numpy as np import torch from accelerate import Accelerator from torch import nn", "true_mask, pred_mask)) def add_callback(self, callback: BaseCallback) -> Trainer: self.callbacks.append(callback) return self def setup_callbacks(self)", "first one, a lil bit hardcoded i know if self.sample_batches is not None", "np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration on callbacks for the test", "TYPE_CHECKING, Any, Dict, Iterable import numpy as np import torch from accelerate import", "dict()} self.best_epoch = None self.best_score = None self.best_state_dict = None self.sample_batches = samples", "store training info self.current_loss = loss.mean() loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val)", "self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch: Any, batch_index: int): # init", "= data[\"tot_loss\"] elapsed = (time.time() - start) # gather info loss_val = loss.mean().item()", "self.train_epoch_start() t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not the best place to call", "length if self.sample_batches is not None and self.sample_batches > 0: self.sample_batches = np.random.choice(len(val_dataloader),", "for metric in self.metrics[stage.value].values(): metric(y_true, y_pred) def _compute_metrics(self, stage: TrainerStage = TrainerStage.train) ->", "epoch instead of iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times) if val_dataloader is not None: self.validation_epoch_start()", "train = \"train\" val = \"val\" test = \"test\" class Trainer: def __init__(self,", "timings def train_epoch_end(self, train_losses: dict, train_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name, values", "self.is_main) self.model.train() for batch in train_tqdm: start = time.time() self.optimizer.zero_grad() data = self.train_batch(batch=batch)", "= self.criterion(preds, y) # gather info images = self.accelerator.gather(x) y_true = self.accelerator.gather(y) y_pred", "Accelerator, task: Task, new_model: nn.Module, old_model: nn.Module, optimizer: Optimizer, scheduler: Any, old_classes: Dict[int,", "losses[name].append(val.mean().item()) timings.append(elapsed) # step the logger self.step() return losses, timings def train_epoch_end(self, train_losses:", "gather info loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we do not log 'iter'", "outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask)) def add_callback(self, callback: BaseCallback) -> Trainer: self.callbacks.append(callback) return self", "from saticl.logging.empty import EmptyLogger from saticl.losses.regularization import MultiModalScaling from saticl.metrics import Metric from", "train_tqdm = progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main) self.model.train() for batch in train_tqdm: start", "for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return losses, timings def validation_epoch_end(self, val_losses:", "return losses, timings def validation_epoch_end(self, val_losses: list, val_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for", "KDD = KD on decoder, KDE = KD on encoder self.criterion_kdd = kdd_criterion", "from enum import Enum from posix import listdir from typing import TYPE_CHECKING, Any,", "self.sample_batches > 0: self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else: self.sample_batches = np.array([]) return", "None self.best_score = None self.best_state_dict = None self.sample_batches = samples self.sample_content = list()", "torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name, values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def", "self.rank == 0 self.current_epoch = -1 self.current_loss = None self.global_step = -1 #", "the best place to call it, but it's best to call it every", "self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for callback in self.callbacks: callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch %2d]", "self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL information for loggers self.task = task self.old_classes = old_classes", "start) loss_value = loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we do not log 'iter' versions,", "= self.validation_batch(batch=batch, batch_index=i) loss = data[\"tot_loss\"] elapsed = (time.time() - start) # gather", "Optimizer, scheduler: Any, old_classes: Dict[int, str], new_classes: Dict[int, str], seg_criterion: nn.Module, kdd_criterion: nn.Module,", "= [] losses = defaultdict(list) train_tqdm = progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main) self.model.train()", "t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not the best place to call it, but", "start = time.time() self.optimizer.zero_grad() data = self.train_batch(batch=batch) loss = data[\"tot_loss\"] # backward pass", "if self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\":", "= progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main) losses, timings, results = [], [], [] #", "to call it, but it's best to call it every epoch instead of", "self.best_epoch = None self.best_score = None self.best_state_dict = None self.sample_batches = samples self.sample_content", "this here, because of the prepare # we swap an integer of num", "we do not log 'iter' versions for loss and timings, since we do", "y_pred: torch.Tensor, stage: TrainerStage = TrainerStage.train) -> None: with torch.no_grad(): for metric in", "advance the logger step # during validation (also, it's kind of useless) #", "loss, data = self.test_batch(batch=batch, batch_index=i) elapsed = (time.time() - start) loss_value = loss.item()", "start = time.time() data = self.validation_batch(batch=batch, batch_index=i) loss = data[\"tot_loss\"] elapsed = (time.time()", "\"ICL steps require the old model for KD\" self.accelerator = accelerator self.debug =", "self.metrics[stage.value].items(): result[name] = metric.compute() self.current_scores[stage.value] = result def _reset_metrics(self, stage: TrainerStage = TrainerStage.train)", "test_batch(self, batch: Any, batch_index: int): x, y = batch x = x.to(self.accelerator.device) y", "self.criterion(new_out, y) # this only has effect from step 1 onwards kdd_loss =", "def validation_batch(self, batch: Any, batch_index: int): # init losses and retrieve x, y", "loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def predict(self, test_dataloader: DataLoader, metrics: Dict[str, Metric], logger_exclude:", "timings.append(elapsed) return losses, timings def validation_epoch_end(self, val_losses: list, val_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val)", "that we don't have to deal with the batch size later if self.sample_batches", "from posix import listdir from typing import TYPE_CHECKING, Any, Dict, Iterable import numpy", "stage: TrainerStage = TrainerStage.train) -> None: result = dict() with torch.no_grad(): for name,", "not in self.metrics, \"stage already present in metrics\" self.metrics[stage.value] = metrics def step(self)", "batch in enumerate(val_tqdm): start = time.time() data = self.validation_batch(batch=batch, batch_index=i) loss = data[\"tot_loss\"]", "gather and update metrics # we group only the 'standard' images, not the", "y_pred, y_true) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return loss,", "i in range(images.size(0)): image = images[i].detach().cpu() true_mask = targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu() self.sample_content.append((image,", "name, item in kwargs.items(): LOG.debug(\"%8s: %s\", name, str(item)) def _store_samples(self, images: torch.Tensor, outputs:", "KDE = KD on encoder self.criterion_kdd = kdd_criterion self.criterion_kde = kde_criterion self.kdd_lambda =", "iteration on callbacks for the test set (e.g. display images) for callback in", "self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch: Any,", "# gather info images = self.accelerator.gather(x) y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(preds) #", "from saticl.logging import BaseLogger from saticl.logging.empty import EmptyLogger from saticl.losses.regularization import MultiModalScaling from", "= TrainerStage.train) -> None: for metric in self.metrics[stage.value].values(): metric.reset() def _log_metrics(self, stage: TrainerStage", "self.current_loss = loss.mean() loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\",", "losses, timings def train_epoch_end(self, train_losses: dict, train_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name,", "import MultiModalScaling from saticl.metrics import Metric from saticl.models.encoders import MultiEncoder from saticl.tasks import", "listdir[BaseCallback] = list() def _prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None) -> None:", "\".join(log_strings)) # then log class-wise results in a single table if classwise: LOG.debug(\"Classwise:", "retrieve x, y x, y = batch seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0) #", "task, using AMP with self.accelerator.autocast(): new_out, new_features = self.model(x) seg_loss = self.criterion(new_out, y)", "# update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\": total, \"seg_loss\":", "self.accelerator.prepare(train_dataloader) if val_dataloader is not None: val_dataloader = self.accelerator.prepare(val_dataloader) # we need to", "LOG.info(\", \".join(log_strings)) # then log class-wise results in a single table if classwise:", "self.model(x) seg_loss = self.criterion(new_out, y) # forward and loss for KD if self.task.step", "from saticl.utils.common import get_logger, progressbar from saticl.utils.decorators import get_rank if TYPE_CHECKING: from saticl.trainer.callbacks", "= time.time() self.optimizer.zero_grad() data = self.train_batch(batch=batch) loss = data[\"tot_loss\"] # backward pass self.accelerator.backward(loss)", "None: for metric in self.metrics[stage.value].values(): metric.reset() def _log_metrics(self, stage: TrainerStage = TrainerStage.train, exclude:", "for curr_epoch in range(max_epochs): self.current_epoch = curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses, t_times", "num samples _, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval() for i, batch", "+ self.kdd_lambda * kdd_loss y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) # store samples", "old_out, old_features = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) total = seg_loss + self.kdd_lambda", "'standard' images, not the rotated ones y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true,", "= metric.compute() self.current_scores[stage.value] = result def _reset_metrics(self, stage: TrainerStage = TrainerStage.train) -> None:", "callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting training\", curr_epoch) break self.dispose_callbacks() return self def", "= None) -> None: self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader) if", "= 100): train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader) self.best_state_dict = self.model.state_dict() self.setup_callbacks() self.global_step =", "seg_loss + self.kdd_lambda * kdd_loss y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) # store", "self.is_main) timings = [] losses = defaultdict(list) with torch.no_grad(): self.model.eval() for i, batch", "isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd = MultiModalScaling() # optimizer, scheduler and logger, scaler for AMP", "for callback in self.callbacks: callback.dispose(self) def add_metrics(self, stage: TrainerStage, metrics: Dict[str, Metric]) ->", "logger_exclude or [] self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main)", "except KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting training\", curr_epoch) break self.dispose_callbacks() return self def test_batch(self,", "exclude or [] scores = self.current_scores[stage.value] classwise = dict() # first log scalars", "and self.sample_batches > 0: self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else: self.sample_batches = np.array([])", "torch.no_grad(): for metric in self.metrics[stage.value].values(): metric(y_true, y_pred) def _compute_metrics(self, stage: TrainerStage = TrainerStage.train)", "self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) # store results for name, val in data.items(): losses[name].append(val.mean().item())", "None: for i in range(images.size(0)): image = images[i].detach().cpu() true_mask = targets[i].detach().cpu() pred_mask =", "def train_epoch_end(self, train_losses: dict, train_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name, values in", "string once completed LOG.info(\", \".join(log_strings)) # then log class-wise results in a single", "in kwargs.items(): LOG.debug(\"%8s: %s\", name, str(item)) def _store_samples(self, images: torch.Tensor, outputs: torch.Tensor, targets:", "= dict() with torch.no_grad(): for name, metric in self.metrics[stage.value].items(): result[name] = metric.compute() self.current_scores[stage.value]", "= batch seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0) # forward and loss on main", "def _compute_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: result = dict() with torch.no_grad():", "loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we do not log 'iter' versions for", "self._compute_metrics(stage=TrainerStage.train) for name, values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self):", "seg_loss + self.kdd_lambda * kdd_loss # gather and update metrics # we group", "instead of iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times) if val_dataloader is not None: self.validation_epoch_start() v_losses,", "Task, new_model: nn.Module, old_model: nn.Module, optimizer: Optimizer, scheduler: Any, old_classes: Dict[int, str], new_classes:", "self.old_classes = old_classes self.new_classes = new_classes self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items())) # internal", "batch seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0) # forward and loss on main task,", "self.metrics[stage.value] = metrics def step(self) -> None: self.global_step += 1 self.logger.step() def train_epoch_start(self):", "info loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we do not log 'iter' versions", "list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def _debug_training(self, **kwargs: dict) -> None: LOG.debug(\"[Epoch %2d] -", "= kde_criterion self.kdd_lambda = kdd_lambda self.kde_lambda = kde_lambda self.multimodal = isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd", "= metrics def step(self) -> None: self.global_step += 1 self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train)", "loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we do not log 'iter' versions, as for validation", "return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration on callbacks", "elapsed time elapsed = (time.time() - start) # store training info self.current_loss =", "# we swap an integer of num samples with a list of indices", "internal monitoring self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()} self.best_epoch = None self.best_score =", "torch.no_grad(): self.model.eval() for i, batch in enumerate(val_tqdm): start = time.time() data = self.validation_batch(batch=batch,", "# store samples for visualization, if present. Requires a plot callback # better", "task.step == 0 or old_model is not None or stage == \"test\", \"ICL", "> 0: old_out, old_features = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) total = seg_loss", "self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def _debug_training(self, **kwargs: dict) -> None: LOG.debug(\"[Epoch %2d] - iteration:", "not None: val_dataloader = self.accelerator.prepare(val_dataloader) # we need to do this here, because", "if train_metrics is not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics is not None: self.add_metrics(stage=TrainerStage.val,", "- start) # store training info self.current_loss = loss.mean() loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\":", "and loss on main task, using AMP with self.accelerator.autocast(): new_out, new_features = self.model(x)", "Trainer: self.callbacks.append(callback) return self def setup_callbacks(self) -> None: for callback in self.callbacks: callback.setup(self)", "or [] self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main) losses,", "y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) # debug if active if self.debug: self._debug_training(x=x.dtype,", "ones y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) # debug if", "_ = self.model(x) seg_loss = self.criterion(new_out, y) # this only has effect from", "replace=False) else: self.sample_batches = np.array([]) return train_dataloader, val_dataloader def _update_metrics(self, y_true: torch.Tensor, y_pred:", "update metrics # we group only the 'standard' images, not the rotated ones", "loader, pass as val loader to store num samples _, test_dataloader = self._prepare(train_dataloader=None,", "predict(self, test_dataloader: DataLoader, metrics: Dict[str, Metric], logger_exclude: Iterable[str] = None, return_preds: bool =", "y_pred = self.accelerator.gather(preds) # store samples for visualization, if present. Requires a plot", "timings = [] losses = defaultdict(list) with torch.no_grad(): self.model.eval() for i, batch in", "-> None: for i in range(images.size(0)): image = images[i].detach().cpu() true_mask = targets[i].detach().cpu() pred_mask", "kdd_loss=kdd_loss) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def train_epoch(self, epoch: int, train_dataloader:", "self.accelerator.gather(preds) # store samples for visualization, if present. Requires a plot callback #", "in self.metrics[stage.value].values(): metric(y_true, y_pred) def _compute_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: result", "the full string once completed LOG.info(\", \".join(log_strings)) # then log class-wise results in", "= KD on encoder self.criterion_kdd = kdd_criterion self.criterion_kde = kde_criterion self.kdd_lambda = kdd_lambda", "get_logger, progressbar from saticl.utils.decorators import get_rank if TYPE_CHECKING: from saticl.trainer.callbacks import BaseCallback LOG", "to deal with the batch size later if self.sample_batches is not None and", "= np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else: self.sample_batches = np.array([]) return train_dataloader, val_dataloader def _update_metrics(self,", "# not the best place to call it, but it's best to call", "import TYPE_CHECKING, Any, Dict, Iterable import numpy as np import torch from accelerate", "have to deal with the batch size later # also, we take just", "losses total = seg_loss + self.kdd_lambda * kdd_loss # gather and update metrics", "total = seg_loss + self.kdd_lambda * kdd_loss y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out)", "info self.current_loss = loss.mean() loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"])", "if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration on", "curr_epoch in range(max_epochs): self.current_epoch = curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses, t_times =", "posix import listdir from typing import TYPE_CHECKING, Any, Dict, Iterable import numpy as", "class-wise results in a single table if classwise: LOG.debug(\"Classwise: %s\", str(classwise)) header =", "val_dataloader = self.accelerator.prepare(val_dataloader) # we need to do this here, because of the", "to call it every epoch instead of iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times) if val_dataloader", "if self.task.step > 0: old_out, old_features = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) total", "y_pred=y_pred, stage=TrainerStage.test) return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def predict(self, test_dataloader: DataLoader, metrics:", "any self.metrics = dict() if train_metrics is not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics", "the test set (e.g. display images) for callback in self.callbacks: callback(self) return losses,", "def predict(self, test_dataloader: DataLoader, metrics: Dict[str, Metric], logger_exclude: Iterable[str] = None, return_preds: bool", "= x.to(self.accelerator.device) y = y.to(self.accelerator.device) # forward and loss on main task, using", "[], [], [] # prepare model and loader, pass as val loader to", "None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL information for loggers self.task = task self.old_classes =", "loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we do not log 'iter' versions for loss and", "torch.no_grad(): self.model.eval() for i, batch in enumerate(test_tqdm): start = time.time() loss, data =", "= -1 self.current_loss = None self.global_step = -1 # internal monitoring self.current_scores =", "do not log 'iter' versions, as for validation losses.append(loss_value) timings.append(elapsed) if return_preds: results.append(data)", "self.sample_content.append((image, true_mask, pred_mask)) def add_callback(self, callback: BaseCallback) -> Trainer: self.callbacks.append(callback) return self def", "continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log the full string once completed LOG.info(\",", "saticl.utils.decorators import get_rank if TYPE_CHECKING: from saticl.trainer.callbacks import BaseCallback LOG = get_logger(__name__) class", "self.current_loss = None self.global_step = -1 # internal monitoring self.current_scores = {TrainerStage.train.value: dict(),", "train_metrics: Dict[str, Metric] = None, val_metrics: Dict[str, Metric] = None, logger: BaseLogger =", "# internal monitoring self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()} self.best_epoch = None self.best_score", "old_model: nn.Module, optimizer: Optimizer, scheduler: Any, old_classes: Dict[int, str], new_classes: Dict[int, str], seg_criterion:", "for KD if self.task.step > 0: old_out, old_features = self.old_model(x) kdd_loss = self.criterion_kdd(new_out,", "and retrieve x, y x, y = batch # forward and loss on", "= self.model(x) loss = self.criterion(preds, y) # gather info images = self.accelerator.gather(x) y_true", "== 0 self.current_epoch = -1 self.current_loss = None self.global_step = -1 # internal", "dict(), TrainerStage.val.value: dict()} self.best_epoch = None self.best_score = None self.best_state_dict = None self.sample_batches", "new_classes self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items())) # internal state self.rank = get_rank() self.is_main", "np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch: Any, batch_index: int): #", "from __future__ import annotations import time from collections import OrderedDict, defaultdict from enum", "metrics=train_metrics) if val_metrics is not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL information for loggers", "import time from collections import OrderedDict, defaultdict from enum import Enum from posix", "present in metrics\" self.metrics[stage.value] = metrics def step(self) -> None: self.global_step += 1", "[] losses = defaultdict(list) with torch.no_grad(): self.model.eval() for i, batch in enumerate(val_tqdm): start", "self.sample_batches: images = self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1]) # update metrics and return losses", "scheduler: Any, old_classes: Dict[int, str], new_classes: Dict[int, str], seg_criterion: nn.Module, kdd_criterion: nn.Module, kde_criterion:", "= None, stage: str = \"train\", debug: bool = False) -> None: assert", "setup_callbacks(self) -> None: for callback in self.callbacks: callback.setup(self) def dispose_callbacks(self) -> None: for", "in self.callbacks: callback.dispose(self) def add_metrics(self, stage: TrainerStage, metrics: Dict[str, Metric]) -> Trainer: assert", "logger: BaseLogger = None, samples: int = None, stage: str = \"train\", debug:", "BaseLogger from saticl.logging.empty import EmptyLogger from saticl.losses.regularization import MultiModalScaling from saticl.metrics import Metric", "__future__ import annotations import time from collections import OrderedDict, defaultdict from enum import", "self.accelerator.gather(x) y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(preds) # store samples for visualization, if", "-> None: LOG.debug(\"[Epoch %2d] - iteration: %d\", self.current_epoch, self.global_step) for name, item in", "LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not the best", "= None) -> None: log_strings = [] exclude = exclude or [] scores", "progressbar from saticl.utils.decorators import get_rank if TYPE_CHECKING: from saticl.trainer.callbacks import BaseCallback LOG =", "# forward and loss on main task, using AMP with self.accelerator.autocast(): new_out, new_features", "self.accelerator.autocast(): new_out, new_features = self.model(x) seg_loss = self.criterion(new_out, y) # forward and loss", "self.current_epoch, self.global_step) for name, item in kwargs.items(): LOG.debug(\"%8s: %s\", name, str(item)) def _store_samples(self,", "total = seg_loss + self.kdd_lambda * kdd_loss # gather and update metrics #", "self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) # debug if active if self.debug:", "in self.metrics, \"stage already present in metrics\" self.metrics[stage.value] = metrics def step(self) ->", "callbacks for the test set (e.g. display images) for callback in self.callbacks: callback(self)", "classwise[metric_name] = score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log the full string", "# forward and loss on segmentation task with self.accelerator.autocast(): new_out, _ = self.model(x)", "seg_loss, \"kdd_loss\": kdd_loss} def train_epoch(self, epoch: int, train_dataloader: DataLoader) -> Any: timings =", "= seg_criterion # knowledge distillation: KDD = KD on decoder, KDE = KD", "from accelerate import Accelerator from torch import nn from torch.optim import Optimizer from", "= self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) # debug if active if", "return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def predict(self,", "> 0: old_out, _ = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) # sum up", "= y.to(self.accelerator.device) # forward and loss on main task, using AMP with self.accelerator.autocast():", "model and loader, pass as val loader to store num samples _, test_dataloader", "images: torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor) -> None: for i in range(images.size(0)): image", "not the best place to call it, but it's best to call it", "= samples self.sample_content = list() self.callbacks: listdir[BaseCallback] = list() def _prepare(self, train_dataloader: DataLoader,", "None self.best_state_dict = None self.sample_batches = samples self.sample_content = list() self.callbacks: listdir[BaseCallback] =", "train_dataloader: DataLoader, val_dataloader: DataLoader = None) -> None: self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer)", "with self.accelerator.autocast(): new_out, _ = self.model(x) seg_loss = self.criterion(new_out, y) # this only", "= optimizer self.scheduler = scheduler self.logger = logger or EmptyLogger() # setup metrics,", "= self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) total = seg_loss + self.kdd_lambda * kdd_loss", "- start) # gather info loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we do", "%s\", name, str(item)) def _store_samples(self, images: torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor) -> None:", "False) -> None: assert task.step == 0 or old_model is not None or", "table if classwise: LOG.debug(\"Classwise: %s\", str(classwise)) header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def", "list() self.callbacks: listdir[BaseCallback] = list() def _prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None)", "= 0 for curr_epoch in range(max_epochs): self.current_epoch = curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start()", "batch: Any, batch_index: int): # init losses and retrieve x, y x, y", "= logger_exclude or [] self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not", "and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss}", "data = self.validation_batch(batch=batch, batch_index=i) loss = data[\"tot_loss\"] elapsed = (time.time() - start) #", "self.sample_batches = np.array([]) return train_dataloader, val_dataloader def _update_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor, stage:", "None, logger: BaseLogger = None, samples: int = None, stage: str = \"train\",", "need to do this here, because of the prepare # we swap an", "def validation_epoch_end(self, val_losses: list, val_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name, values in", "curr_epoch) break self.dispose_callbacks() return self def test_batch(self, batch: Any, batch_index: int): x, y", "old_model is not None or stage == \"test\", \"ICL steps require the old", "0 self.current_epoch = -1 self.current_loss = None self.global_step = -1 # internal monitoring", "= accelerator self.debug = debug self.model = new_model self.old_model = old_model self.criterion =", "is not None: self.validation_epoch_start() v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for callback", "# log the full string once completed LOG.info(\", \".join(log_strings)) # then log class-wise", "and loader, pass as val loader to store num samples _, test_dataloader =", "Any, batch_index: int): # init losses and retrieve x, y x, y =", "LOG = get_logger(__name__) class TrainerStage(str, Enum): train = \"train\" val = \"val\" test", "add_metrics(self, stage: TrainerStage, metrics: Dict[str, Metric]) -> Trainer: assert stage.value not in self.metrics,", "# internal state self.rank = get_rank() self.is_main = self.rank == 0 self.current_epoch =", "versions, as for validation losses.append(loss_value) timings.append(elapsed) if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings))", "loss = self.criterion(preds, y) # gather info images = self.accelerator.gather(x) y_true = self.accelerator.gather(y)", "self.criterion_kdd(new_out, old_out) # sum up losses total = seg_loss + self.kdd_lambda * kdd_loss", "old_out, _ = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) # sum up losses total", "EmptyLogger() # setup metrics, if any self.metrics = dict() if train_metrics is not", "= [] exclude = exclude or [] scores = self.current_scores[stage.value] classwise = dict()", "true_mask = targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask)) def add_callback(self, callback: BaseCallback)", "import Optimizer from torch.utils.data import DataLoader from saticl.logging import BaseLogger from saticl.logging.empty import", "exclude: continue if score.ndim > 0: # store for later classwise[metric_name] = score", "in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return losses, timings def validation_epoch_end(self, val_losses: list, val_times: list):", "for i, batch in enumerate(val_tqdm): start = time.time() data = self.validation_batch(batch=batch, batch_index=i) loss", "group only the 'standard' images, not the rotated ones y_true = self.accelerator.gather(y) y_pred", "elapsed = (time.time() - start) # gather info loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"})", "LOG.debug(\"%8s: %s\", name, str(item)) def _store_samples(self, images: torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor) ->", "-1 # internal monitoring self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()} self.best_epoch = None", "= result def _reset_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: for metric in", "kdd_loss} def train_epoch(self, epoch: int, train_dataloader: DataLoader) -> Any: timings = [] losses", "None, return_preds: bool = False): logger_exclude = logger_exclude or [] self.metrics[TrainerStage.test.value] = metrics", "np.array([]) return train_dataloader, val_dataloader def _update_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor, stage: TrainerStage =", "-> None: self.global_step += 1 self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch: Any)", "same length if self.sample_batches is not None and self.sample_batches > 0: self.sample_batches =", "import Metric from saticl.models.encoders import MultiEncoder from saticl.tasks import Task from saticl.utils.common import", "= self.test_batch(batch=batch, batch_index=i) elapsed = (time.time() - start) loss_value = loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"})", "= self.accelerator.gather(x) y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(preds) # store samples for visualization,", "a single table if classwise: LOG.debug(\"Classwise: %s\", str(classwise)) header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header,", "from saticl.losses.regularization import MultiModalScaling from saticl.metrics import Metric from saticl.models.encoders import MultiEncoder from", "= \"train\", debug: bool = False) -> None: assert task.step == 0 or", "callback in self.callbacks: callback.setup(self) def dispose_callbacks(self) -> None: for callback in self.callbacks: callback.dispose(self)", "fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None, max_epochs: int = 100): train_dataloader, val_dataloader", "saticl.losses.regularization import MultiModalScaling from saticl.metrics import Metric from saticl.models.encoders import MultiEncoder from saticl.tasks", "= \"val\" test = \"test\" class Trainer: def __init__(self, accelerator: Accelerator, task: Task,", "import BaseCallback LOG = get_logger(__name__) class TrainerStage(str, Enum): train = \"train\" val =", "kdd_loss} def validation_epoch(self, epoch: int, val_dataloader: DataLoader) -> Any: val_tqdm = progressbar(val_dataloader, epoch=epoch,", "= data[\"tot_loss\"] # backward pass self.accelerator.backward(loss) self.optimizer.step() # measure elapsed time elapsed =", "seg_criterion # knowledge distillation: KDD = KD on decoder, KDE = KD on", "dict) -> None: LOG.debug(\"[Epoch %2d] - iteration: %d\", self.current_epoch, self.global_step) for name, item", "stage.value not in self.metrics, \"stage already present in metrics\" self.metrics[stage.value] = metrics def", "self.task.step > 0: old_out, old_features = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) total =", "is not None and batch_index in self.sample_batches: images = self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1])", "KD on encoder self.criterion_kdd = kdd_criterion self.criterion_kde = kde_criterion self.kdd_lambda = kdd_lambda self.kde_lambda", "is not None and self.sample_batches > 0: self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else:", "# we do not log 'iter' versions for loss and timings, since we", "timings def validation_epoch_end(self, val_losses: list, val_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name, values", "None self.global_step = -1 # internal monitoring self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()}", "np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None, max_epochs: int =", "= loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we do not log 'iter' versions for loss", "def add_callback(self, callback: BaseCallback) -> Trainer: self.callbacks.append(callback) return self def setup_callbacks(self) -> None:", "elapsed) # store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) # step", "# optimizer, scheduler and logger, scaler for AMP self.optimizer = optimizer self.scheduler =", "'iter' versions, as for validation losses.append(loss_value) timings.append(elapsed) if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\",", "= dict() # first log scalars for metric_name, score in scores.items(): if metric_name", "seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def train_epoch(self, epoch: int,", "losses[name].append(val.mean().item()) timings.append(elapsed) return losses, timings def validation_epoch_end(self, val_losses: list, val_times: list): with torch.no_grad():", "train_batch(self, batch: Any) -> torch.Tensor: # init losses and retrieve x, y x,", "kdd_loss = torch.tensor(0.0), torch.tensor(0.0) # forward and loss on main task, using AMP", "images, not the rotated ones y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred,", "self.accelerator.autocast(): new_out, _ = self.model(x) seg_loss = self.criterion(new_out, y) # this only has", "import listdir from typing import TYPE_CHECKING, Any, Dict, Iterable import numpy as np", "take just the first one, a lil bit hardcoded i know if self.sample_batches", "def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch: Any) -> torch.Tensor: # init losses and", "y_pred=y_pred, stage=TrainerStage.train) # debug if active if self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss)", "= exclude or [] scores = self.current_scores[stage.value] classwise = dict() # first log", "callback.setup(self) def dispose_callbacks(self) -> None: for callback in self.callbacks: callback.dispose(self) def add_metrics(self, stage:", "and logger, scaler for AMP self.optimizer = optimizer self.scheduler = scheduler self.logger =", "or [] scores = self.current_scores[stage.value] classwise = dict() # first log scalars for", "total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def train_epoch(self, epoch: int, train_dataloader: DataLoader) -> Any:", "torch.Tensor, targets: torch.Tensor) -> None: for i in range(images.size(0)): image = images[i].detach().cpu() true_mask", "add_callback(self, callback: BaseCallback) -> Trainer: self.callbacks.append(callback) return self def setup_callbacks(self) -> None: for", "None and batch_index in self.sample_batches: images = self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1]) # update", "self.optimizer.step() # measure elapsed time elapsed = (time.time() - start) # store training", "KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting training\", curr_epoch) break self.dispose_callbacks() return self def test_batch(self, batch:", "with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name, values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val)", "preds, _ = self.model(x) loss = self.criterion(preds, y) # gather info images =", "self.test_batch(batch=batch, batch_index=i) elapsed = (time.time() - start) loss_value = loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) #", "and loss for KD if self.task.step > 0: old_out, old_features = self.old_model(x) kdd_loss", "= progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main) timings = [] losses = defaultdict(list) with", "if val_metrics is not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL information for loggers self.task", "None: with torch.no_grad(): for metric in self.metrics[stage.value].values(): metric(y_true, y_pred) def _compute_metrics(self, stage: TrainerStage", "np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch: Any, batch_index:", "self.is_main) losses, timings, results = [], [], [] # prepare model and loader,", "self.callbacks: callback.setup(self) def dispose_callbacks(self) -> None: for callback in self.callbacks: callback.dispose(self) def add_metrics(self,", "# forward and loss for KD if self.task.step > 0: old_out, old_features =", "Dict[str, Metric] = None, val_metrics: Dict[str, Metric] = None, logger: BaseLogger = None,", "loader to store num samples _, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval()", "self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main) losses, timings, results = [], [],", "validation_epoch_end(self, val_losses: list, val_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name, values in val_losses.items():", "main task, using AMP with self.accelerator.autocast(): preds, _ = self.model(x) loss = self.criterion(preds,", "test_dataloader: DataLoader, metrics: Dict[str, Metric], logger_exclude: Iterable[str] = None, return_preds: bool = False):", "for name, item in kwargs.items(): LOG.debug(\"%8s: %s\", name, str(item)) def _store_samples(self, images: torch.Tensor,", "information for loggers self.task = task self.old_classes = old_classes self.new_classes = new_classes self.all_classes", "None: LOG.debug(\"[Epoch %2d] - iteration: %d\", self.current_epoch, self.global_step) for name, item in kwargs.items():", "val_dataloader is not None: val_dataloader = self.accelerator.prepare(val_dataloader) # we need to do this", "y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) # store samples for visualization, if present.", "result def _reset_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: for metric in self.metrics[stage.value].values():", "- iteration: %d\", self.current_epoch, self.global_step) for name, item in kwargs.items(): LOG.debug(\"%8s: %s\", name,", "store num samples _, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval() for i,", "stage: TrainerStage, metrics: Dict[str, Metric]) -> Trainer: assert stage.value not in self.metrics, \"stage", "t_times) if val_dataloader is not None: self.validation_epoch_start() v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses,", "stage=TrainerStage.train) # debug if active if self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return", "stage=TrainerStage.val.value, disable=not self.is_main) timings = [] losses = defaultdict(list) with torch.no_grad(): self.model.eval() for", "dim=1).cpu()) def predict(self, test_dataloader: DataLoader, metrics: Dict[str, Metric], logger_exclude: Iterable[str] = None, return_preds:", "collections import OrderedDict, defaultdict from enum import Enum from posix import listdir from", "headers=header, results=classwise) def _debug_training(self, **kwargs: dict) -> None: LOG.debug(\"[Epoch %2d] - iteration: %d\",", "return self def test_batch(self, batch: Any, batch_index: int): x, y = batch x", "None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics is not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL information", "old_out) total = seg_loss + self.kdd_lambda * kdd_loss y_true = self.accelerator.gather(y) y_pred =", "self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader) if val_dataloader is not None: val_dataloader = self.accelerator.prepare(val_dataloader) #", "kdd_criterion self.criterion_kde = kde_criterion self.kdd_lambda = kdd_lambda self.kde_lambda = kde_lambda self.multimodal = isinstance(new_model.encoder,", "'iter' versions for loss and timings, since we do not advance the logger", "score.ndim > 0: # store for later classwise[metric_name] = score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score)", "Iterable import numpy as np import torch from accelerate import Accelerator from torch", "using AMP with self.accelerator.autocast(): preds, _ = self.model(x) loss = self.criterion(preds, y) #", "i, batch in enumerate(test_tqdm): start = time.time() loss, data = self.test_batch(batch=batch, batch_index=i) elapsed", "stage=TrainerStage.test) return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def predict(self, test_dataloader: DataLoader, metrics: Dict[str,", "def __init__(self, accelerator: Accelerator, task: Task, new_model: nn.Module, old_model: nn.Module, optimizer: Optimizer, scheduler:", "model for KD\" self.accelerator = accelerator self.debug = debug self.model = new_model self.old_model", "timings.append(elapsed) # step the logger self.step() return losses, timings def train_epoch_end(self, train_losses: dict,", "== 0 or old_model is not None or stage == \"test\", \"ICL steps", "_ = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) # sum up losses total =", "y = batch # forward and loss on segmentation task with self.accelerator.autocast(): new_out,", "do this here, because of the prepare # we swap an integer of", "the 'standard' images, not the rotated ones y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out)", "in self.sample_batches: images = self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1]) # update metrics and return", "train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) # store results for name,", "dict() with torch.no_grad(): for name, metric in self.metrics[stage.value].items(): result[name] = metric.compute() self.current_scores[stage.value] =", "# prepare model and loader, pass as val loader to store num samples", "self.debug = debug self.model = new_model self.old_model = old_model self.criterion = seg_criterion #", "self.global_step += 1 self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch: Any) -> torch.Tensor:", "result[name] = metric.compute() self.current_scores[stage.value] = result def _reset_metrics(self, stage: TrainerStage = TrainerStage.train) ->", "visualization, if present. Requires a plot callback # better to unpack now, so", "data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) # step the logger self.step() return losses, timings def train_epoch_end(self,", "= OrderedDict(list(old_classes.items()) + list(new_classes.items())) # internal state self.rank = get_rank() self.is_main = self.rank", "timings.append(elapsed) if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration", "MultiModalScaling() # optimizer, scheduler and logger, scaler for AMP self.optimizer = optimizer self.scheduler", "train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader) self.best_state_dict = self.model.state_dict() self.setup_callbacks() self.global_step = 0 for", "-> Trainer: self.callbacks.append(callback) return self def setup_callbacks(self) -> None: for callback in self.callbacks:", "have to deal with the batch size later if self.sample_batches is not None", "= seg_loss + self.kdd_lambda * kdd_loss # gather and update metrics # we", "not None and batch_index in self.sample_batches: images = self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1]) #", "= list() def _prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None) -> None: self.model,", "self.sample_batches = samples self.sample_content = list() self.callbacks: listdir[BaseCallback] = list() def _prepare(self, train_dataloader:", "results in a single table if classwise: LOG.debug(\"Classwise: %s\", str(classwise)) header = list(self.all_classes.values())", "def _store_samples(self, images: torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor) -> None: for i in", "it's best to call it every epoch instead of iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times)", "self.optimizer.zero_grad() data = self.train_batch(batch=batch) loss = data[\"tot_loss\"] # backward pass self.accelerator.backward(loss) self.optimizer.step() #", "so that we don't have to deal with the batch size later if", "self.metrics, \"stage already present in metrics\" self.metrics[stage.value] = metrics def step(self) -> None:", "TYPE_CHECKING: from saticl.trainer.callbacks import BaseCallback LOG = get_logger(__name__) class TrainerStage(str, Enum): train =", "_log_metrics(self, stage: TrainerStage = TrainerStage.train, exclude: Iterable[str] = None) -> None: log_strings =", "DataLoader = None, max_epochs: int = 100): train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader) self.best_state_dict", "logger step # during validation (also, it's kind of useless) # store results", "in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) # step the logger self.step() return losses, timings def", "to deal with the batch size later # also, we take just the", "metrics: Dict[str, Metric]) -> Trainer: assert stage.value not in self.metrics, \"stage already present", "break self.dispose_callbacks() return self def test_batch(self, batch: Any, batch_index: int): x, y =", "= self._prepare(train_dataloader, val_dataloader) self.best_state_dict = self.model.state_dict() self.setup_callbacks() self.global_step = 0 for curr_epoch in", "train_dataloader, val_dataloader def _update_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor, stage: TrainerStage = TrainerStage.train) ->", "Metric from saticl.models.encoders import MultiEncoder from saticl.tasks import Task from saticl.utils.common import get_logger,", "kwargs.items(): LOG.debug(\"%8s: %s\", name, str(item)) def _store_samples(self, images: torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor)", "the first one, a lil bit hardcoded i know if self.sample_batches is not", "call it every epoch instead of iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times) if val_dataloader is", "timings = [] losses = defaultdict(list) train_tqdm = progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main)", "self.dispose_callbacks() return self def test_batch(self, batch: Any, batch_index: int): x, y = batch", "-> None: for metric in self.metrics[stage.value].values(): metric.reset() def _log_metrics(self, stage: TrainerStage = TrainerStage.train,", "y_true: torch.Tensor, y_pred: torch.Tensor, stage: TrainerStage = TrainerStage.train) -> None: with torch.no_grad(): for", "from saticl.tasks import Task from saticl.utils.common import get_logger, progressbar from saticl.utils.decorators import get_rank", "= self.model.state_dict() self.setup_callbacks() self.global_step = 0 for curr_epoch in range(max_epochs): self.current_epoch = curr_epoch", "_prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None) -> None: self.model, self.optimizer = self.accelerator.prepare(self.model,", "_store_samples(self, images: torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor) -> None: for i in range(images.size(0)):", "accelerator: Accelerator, task: Task, new_model: nn.Module, old_model: nn.Module, optimizer: Optimizer, scheduler: Any, old_classes:", "kde_lambda self.multimodal = isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd = MultiModalScaling() # optimizer, scheduler and logger,", "str], new_classes: Dict[int, str], seg_criterion: nn.Module, kdd_criterion: nn.Module, kde_criterion: nn.Module = None, kdd_lambda:", "str(item)) def _store_samples(self, images: torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor) -> None: for i", "batch size later if self.sample_batches is not None and batch_index in self.sample_batches: self._store_samples(images,", "None: self.global_step += 1 self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch: Any) ->", "to store num samples _, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval() for", "(also, it's kind of useless) # store results for name, val in data.items():", "only the 'standard' images, not the rotated ones y_true = self.accelerator.gather(y) y_pred =", "val_dataloader: DataLoader = None) -> None: self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) train_dataloader =", "self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch: Any, batch_index: int): # init losses and retrieve x,", "for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) # step the logger self.step() return", "losses and retrieve x, y x, y = batch seg_loss, kdd_loss = torch.tensor(0.0),", "self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def", "data[\"tot_loss\"] # backward pass self.accelerator.backward(loss) self.optimizer.step() # measure elapsed time elapsed = (time.time()", "header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def _debug_training(self, **kwargs: dict) -> None: LOG.debug(\"[Epoch", "val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for callback in self.callbacks: callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting", "= None self.sample_batches = samples self.sample_content = list() self.callbacks: listdir[BaseCallback] = list() def", "Trainer: assert stage.value not in self.metrics, \"stage already present in metrics\" self.metrics[stage.value] =", "TrainerStage.val.value: dict()} self.best_epoch = None self.best_score = None self.best_state_dict = None self.sample_batches =", "disable=not self.is_main) self.model.train() for batch in train_tqdm: start = time.time() self.optimizer.zero_grad() data =", "LOG.debug(\"[Epoch %2d] - iteration: %d\", self.current_epoch, self.global_step) for name, item in kwargs.items(): LOG.debug(\"%8s:", "loss = data[\"tot_loss\"] # backward pass self.accelerator.backward(loss) self.optimizer.step() # measure elapsed time elapsed", "seg_loss = self.criterion(new_out, y) # forward and loss for KD if self.task.step >", "self.metrics[stage.value].values(): metric.reset() def _log_metrics(self, stage: TrainerStage = TrainerStage.train, exclude: Iterable[str] = None) ->", "step(self) -> None: self.global_step += 1 self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch:", "y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def validation_epoch(self, epoch: int,", "nn.Module, old_model: nn.Module, optimizer: Optimizer, scheduler: Any, old_classes: Dict[int, str], new_classes: Dict[int, str],", "%d\", self.current_epoch, self.global_step) for name, item in kwargs.items(): LOG.debug(\"%8s: %s\", name, str(item)) def", "on encoder self.criterion_kdd = kdd_criterion self.criterion_kde = kde_criterion self.kdd_lambda = kdd_lambda self.kde_lambda =", "in train_tqdm: start = time.time() self.optimizer.zero_grad() data = self.train_batch(batch=batch) loss = data[\"tot_loss\"] #", "self._prepare(train_dataloader, val_dataloader) self.best_state_dict = self.model.state_dict() self.setup_callbacks() self.global_step = 0 for curr_epoch in range(max_epochs):", "= self.accelerator.prepare(val_dataloader) # we need to do this here, because of the prepare", "kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step > 0: old_out, _ = self.old_model(x)", "deal with the batch size later if self.sample_batches is not None and batch_index", "metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main) losses, timings, results = [],", "num samples with a list of indices with same length if self.sample_batches is", "None: result = dict() with torch.no_grad(): for name, metric in self.metrics[stage.value].items(): result[name] =", "info images = self.accelerator.gather(x) y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(preds) # store samples", "debug self.model = new_model self.old_model = old_model self.criterion = seg_criterion # knowledge distillation:", "None: for callback in self.callbacks: callback.setup(self) def dispose_callbacks(self) -> None: for callback in", "epoch: int, train_dataloader: DataLoader) -> Any: timings = [] losses = defaultdict(list) train_tqdm", "is not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL information for loggers self.task = task", "data = self.train_batch(batch=batch) loss = data[\"tot_loss\"] # backward pass self.accelerator.backward(loss) self.optimizer.step() # measure", "import Accelerator from torch import nn from torch.optim import Optimizer from torch.utils.data import", "%2d] - iteration: %d\", self.current_epoch, self.global_step) for name, item in kwargs.items(): LOG.debug(\"%8s: %s\",", "it, but it's best to call it every epoch instead of iteration self.scheduler.step()", "= self.model(x) seg_loss = self.criterion(new_out, y) # this only has effect from step", "callback # better to unpack now, so that we don't have to deal", "i know if self.sample_batches is not None and batch_index in self.sample_batches: images =", "stage: TrainerStage = TrainerStage.train, exclude: Iterable[str] = None) -> None: log_strings = []", "self.train_epoch_end(t_losses, t_times) if val_dataloader is not None: self.validation_epoch_start() v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader)", "test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we do not log 'iter' versions, as for validation losses.append(loss_value)", "Dict[str, Metric]) -> Trainer: assert stage.value not in self.metrics, \"stage already present in", "list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name, values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times))", "saticl.tasks import Task from saticl.utils.common import get_logger, progressbar from saticl.utils.decorators import get_rank if", "_debug_training(self, **kwargs: dict) -> None: LOG.debug(\"[Epoch %2d] - iteration: %d\", self.current_epoch, self.global_step) for", "self.criterion_mmd = MultiModalScaling() # optimizer, scheduler and logger, scaler for AMP self.optimizer =", "from collections import OrderedDict, defaultdict from enum import Enum from posix import listdir", "self.setup_callbacks() self.global_step = 0 for curr_epoch in range(max_epochs): self.current_epoch = curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\")", "up losses total = seg_loss + self.kdd_lambda * kdd_loss # gather and update", "stage: str = \"train\", debug: bool = False) -> None: assert task.step ==", "None: self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader) if val_dataloader is not", "= scheduler self.logger = logger or EmptyLogger() # setup metrics, if any self.metrics", "None, val_metrics: Dict[str, Metric] = None, logger: BaseLogger = None, samples: int =", "None) -> None: log_strings = [] exclude = exclude or [] scores =", "Enum from posix import listdir from typing import TYPE_CHECKING, Any, Dict, Iterable import", "y = batch seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0) # forward and loss on", "is not None: val_dataloader = self.accelerator.prepare(val_dataloader) # we need to do this here,", "batch: Any, batch_index: int): x, y = batch x = x.to(self.accelerator.device) y =", "results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return losses, timings def validation_epoch_end(self,", "old model for KD\" self.accelerator = accelerator self.debug = debug self.model = new_model", "not advance the logger step # during validation (also, it's kind of useless)", "if val_dataloader is not None: self.validation_epoch_start() v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times)", "in scores.items(): if metric_name in exclude: continue if score.ndim > 0: # store", "encoder self.criterion_kdd = kdd_criterion self.criterion_kde = kde_criterion self.kdd_lambda = kdd_lambda self.kde_lambda = kde_lambda", "integer of num samples with a list of indices with same length if", "metric_name, score in scores.items(): if metric_name in exclude: continue if score.ndim > 0:", "def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch: Any, batch_index: int): # init losses", "import get_rank if TYPE_CHECKING: from saticl.trainer.callbacks import BaseCallback LOG = get_logger(__name__) class TrainerStage(str,", "str(classwise)) header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def _debug_training(self, **kwargs: dict) -> None:", "= self.current_scores[stage.value] classwise = dict() # first log scalars for metric_name, score in", "list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name, values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times))", "# knowledge distillation: KDD = KD on decoder, KDE = KD on encoder", "+ self.kdd_lambda * kdd_loss # gather and update metrics # we group only", "logger_exclude: Iterable[str] = None, return_preds: bool = False): logger_exclude = logger_exclude or []", "= time.time() data = self.validation_batch(batch=batch, batch_index=i) loss = data[\"tot_loss\"] elapsed = (time.time() -", "torch import nn from torch.optim import Optimizer from torch.utils.data import DataLoader from saticl.logging", "optimizer, scheduler and logger, scaler for AMP self.optimizer = optimizer self.scheduler = scheduler", "accelerate import Accelerator from torch import nn from torch.optim import Optimizer from torch.utils.data", "exclude = exclude or [] scores = self.current_scores[stage.value] classwise = dict() # first", "stage=TrainerStage.train.value, disable=not self.is_main) self.model.train() for batch in train_tqdm: start = time.time() self.optimizer.zero_grad() data", "OrderedDict(list(old_classes.items()) + list(new_classes.items())) # internal state self.rank = get_rank() self.is_main = self.rank ==", "y) # forward and loss for KD if self.task.step > 0: old_out, old_features", "{\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def train_epoch(self, epoch: int, train_dataloader: DataLoader) ->", "Dict[str, Metric] = None, logger: BaseLogger = None, samples: int = None, stage:", "float = 0.0, kde_lambda: float = 0.0, train_metrics: Dict[str, Metric] = None, val_metrics:", "self.criterion(new_out, y) # forward and loss for KD if self.task.step > 0: old_out,", "[] losses = defaultdict(list) train_tqdm = progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main) self.model.train() for", "kde_criterion: nn.Module = None, kdd_lambda: float = 0.0, kde_lambda: float = 0.0, train_metrics:", "None, max_epochs: int = 100): train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader) self.best_state_dict = self.model.state_dict()", "kind of useless) # store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed)", "= self.criterion(new_out, y) # forward and loss for KD if self.task.step > 0:", "= None self.best_state_dict = None self.sample_batches = samples self.sample_content = list() self.callbacks: listdir[BaseCallback]", "np import torch from accelerate import Accelerator from torch import nn from torch.optim", "self.kde_lambda = kde_lambda self.multimodal = isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd = MultiModalScaling() # optimizer, scheduler", "self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def predict(self, test_dataloader: DataLoader,", "= 0.0, kde_lambda: float = 0.0, train_metrics: Dict[str, Metric] = None, val_metrics: Dict[str,", "rotated ones y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) # debug", "torch from accelerate import Accelerator from torch import nn from torch.optim import Optimizer", "for batch in train_tqdm: start = time.time() self.optimizer.zero_grad() data = self.train_batch(batch=batch) loss =", "\"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def validation_epoch(self, epoch: int, val_dataloader: DataLoader) -> Any: val_tqdm", "self.sample_batches, replace=False) else: self.sample_batches = np.array([]) return train_dataloader, val_dataloader def _update_metrics(self, y_true: torch.Tensor,", "= loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we do not log 'iter' versions, as for", "scheduler self.logger = logger or EmptyLogger() # setup metrics, if any self.metrics =", "is not None and batch_index in self.sample_batches: self._store_samples(images, y_pred, y_true) # update metrics", "self.new_classes = new_classes self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items())) # internal state self.rank =", "self.callbacks: callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting training\", curr_epoch) break self.dispose_callbacks() return self", "{self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not the best place", "enum import Enum from posix import listdir from typing import TYPE_CHECKING, Any, Dict,", "name, str(item)) def _store_samples(self, images: torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor) -> None: for", "kde_criterion self.kdd_lambda = kdd_lambda self.kde_lambda = kde_lambda self.multimodal = isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd =", "deal with the batch size later # also, we take just the first", "self.criterion_kde = kde_criterion self.kdd_lambda = kdd_lambda self.kde_lambda = kde_lambda self.multimodal = isinstance(new_model.encoder, MultiEncoder)", "in range(images.size(0)): image = images[i].detach().cpu() true_mask = targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu() self.sample_content.append((image, true_mask,", "self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) # store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed)", "self.accelerator.autocast(): preds, _ = self.model(x) loss = self.criterion(preds, y) # gather info images", "later if self.sample_batches is not None and batch_index in self.sample_batches: self._store_samples(images, y_pred, y_true)", "* kdd_loss y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) # store samples for visualization,", "validation_epoch(self, epoch: int, val_dataloader: DataLoader) -> Any: val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not", "new_model self.old_model = old_model self.criterion = seg_criterion # knowledge distillation: KDD = KD", "curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not the", "= self.model(x) seg_loss = self.criterion(new_out, y) # forward and loss for KD if", "versions for loss and timings, since we do not advance the logger step", "self.callbacks: callback.dispose(self) def add_metrics(self, stage: TrainerStage, metrics: Dict[str, Metric]) -> Trainer: assert stage.value", "batch x = x.to(self.accelerator.device) y = y.to(self.accelerator.device) # forward and loss on main", "for the test set (e.g. display images) for callback in self.callbacks: callback(self) return", "= self.rank == 0 self.current_epoch = -1 self.current_loss = None self.global_step = -1", "= task self.old_classes = old_classes self.new_classes = new_classes self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items()))", "of useless) # store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return", "# we do not log 'iter' versions, as for validation losses.append(loss_value) timings.append(elapsed) if", "import DataLoader from saticl.logging import BaseLogger from saticl.logging.empty import EmptyLogger from saticl.losses.regularization import", "# store for later classwise[metric_name] = score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") #", "self.sample_batches is not None and batch_index in self.sample_batches: images = self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1],", "{TrainerStage.train.value: dict(), TrainerStage.val.value: dict()} self.best_epoch = None self.best_score = None self.best_state_dict = None", "TrainerStage(str, Enum): train = \"train\" val = \"val\" test = \"test\" class Trainer:", "get_rank() self.is_main = self.rank == 0 self.current_epoch = -1 self.current_loss = None self.global_step", "in metrics\" self.metrics[stage.value] = metrics def step(self) -> None: self.global_step += 1 self.logger.step()", "_reset_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: for metric in self.metrics[stage.value].values(): metric.reset() def", "prepare model and loader, pass as val loader to store num samples _,", "(time.time() - start) # gather info loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we", "single table if classwise: LOG.debug(\"Classwise: %s\", str(classwise)) header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise)", "class TrainerStage(str, Enum): train = \"train\" val = \"val\" test = \"test\" class", "get_rank if TYPE_CHECKING: from saticl.trainer.callbacks import BaseCallback LOG = get_logger(__name__) class TrainerStage(str, Enum):", "Metric], logger_exclude: Iterable[str] = None, return_preds: bool = False): logger_exclude = logger_exclude or", "= kdd_lambda self.kde_lambda = kde_lambda self.multimodal = isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd = MultiModalScaling() #", "def _prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None) -> None: self.model, self.optimizer =", "losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def predict(self, test_dataloader:", "scaler for AMP self.optimizer = optimizer self.scheduler = scheduler self.logger = logger or", "accelerator self.debug = debug self.model = new_model self.old_model = old_model self.criterion = seg_criterion", "progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main) self.model.train() for batch in train_tqdm: start = time.time()", "\"stage already present in metrics\" self.metrics[stage.value] = metrics def step(self) -> None: self.global_step", "self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval() for i, batch in enumerate(test_tqdm): start = time.time()", "= 0.0, train_metrics: Dict[str, Metric] = None, val_metrics: Dict[str, Metric] = None, logger:", "for i in range(images.size(0)): image = images[i].detach().cpu() true_mask = targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu()", "validation_batch(self, batch: Any, batch_index: int): # init losses and retrieve x, y x,", "place to call it, but it's best to call it every epoch instead", "= torch.tensor(0.0), torch.tensor(0.0) # forward and loss on main task, using AMP with", "self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration on callbacks for the test set", "None: log_strings = [] exclude = exclude or [] scores = self.current_scores[stage.value] classwise", "self.optimizer = self.accelerator.prepare(self.model, self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader) if val_dataloader is not None: val_dataloader", "def train_batch(self, batch: Any) -> torch.Tensor: # init losses and retrieve x, y", "-> None: result = dict() with torch.no_grad(): for name, metric in self.metrics[stage.value].items(): result[name]", "MultiEncoder from saticl.tasks import Task from saticl.utils.common import get_logger, progressbar from saticl.utils.decorators import", "seg_criterion: nn.Module, kdd_criterion: nn.Module, kde_criterion: nn.Module = None, kdd_lambda: float = 0.0, kde_lambda:", "x, y = batch seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0) # forward and loss", "as np import torch from accelerate import Accelerator from torch import nn from", "forward and loss on main task, using AMP with self.accelerator.autocast(): new_out, new_features =", "scalars for metric_name, score in scores.items(): if metric_name in exclude: continue if score.ndim", "= list() self.callbacks: listdir[BaseCallback] = list() def _prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader =", "self.step() return losses, timings def train_epoch_end(self, train_losses: dict, train_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train)", "v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for callback in self.callbacks: callback(self) except", "callback in self.callbacks: callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting training\", curr_epoch) break self.dispose_callbacks()", "if self.sample_batches is not None and self.sample_batches > 0: self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches,", "start = time.time() loss, data = self.test_batch(batch=batch, batch_index=i) elapsed = (time.time() - start)", "is not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics is not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) #", "= new_classes self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items())) # internal state self.rank = get_rank()", "enumerate(val_tqdm): start = time.time() data = self.validation_batch(batch=batch, batch_index=i) loss = data[\"tot_loss\"] elapsed =", "> 0: self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else: self.sample_batches = np.array([]) return train_dataloader,", "in enumerate(test_tqdm): start = time.time() loss, data = self.test_batch(batch=batch, batch_index=i) elapsed = (time.time()", "f\"{loss_value:.4f}\"}) # we do not log 'iter' versions, as for validation losses.append(loss_value) timings.append(elapsed)", "training info self.current_loss = loss.mean() loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\",", "self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) # store samples for visualization, if present. Requires a", "torch.Tensor, y_pred: torch.Tensor, stage: TrainerStage = TrainerStage.train) -> None: with torch.no_grad(): for metric", "# forward and loss on main task, using AMP with self.accelerator.autocast(): preds, _", "\"test\", \"ICL steps require the old model for KD\" self.accelerator = accelerator self.debug", "if self.task.step > 0: old_out, _ = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) #", "forward and loss for KD if self.task.step > 0: old_out, old_features = self.old_model(x)", "TrainerStage = TrainerStage.train) -> None: for metric in self.metrics[stage.value].values(): metric.reset() def _log_metrics(self, stage:", "-> None: self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader) if val_dataloader is", "# during validation (also, it's kind of useless) # store results for name,", "= score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log the full string once", "stage=TrainerStage.test.value, disable=not self.is_main) losses, timings, results = [], [], [] # prepare model", "None: assert task.step == 0 or old_model is not None or stage ==", "__init__(self, accelerator: Accelerator, task: Task, new_model: nn.Module, old_model: nn.Module, optimizer: Optimizer, scheduler: Any,", "= self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) # store samples for visualization, if present. Requires", "loss for KD if self.task.step > 0: old_out, old_features = self.old_model(x) kdd_loss =", "self.accelerator.prepare(val_dataloader) # we need to do this here, because of the prepare #", "the batch size later if self.sample_batches is not None and batch_index in self.sample_batches:", "self.old_model = old_model self.criterion = seg_criterion # knowledge distillation: KDD = KD on", "self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) # sum up losses total = seg_loss +", "classwise = dict() # first log scalars for metric_name, score in scores.items(): if", "self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch: Any) -> torch.Tensor: # init losses and retrieve x,", "on decoder, KDE = KD on encoder self.criterion_kdd = kdd_criterion self.criterion_kde = kde_criterion", "= loss.mean() loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed)", "disable=not self.is_main) losses, timings, results = [], [], [] # prepare model and", "= [], [], [] # prepare model and loader, pass as val loader", "= self.criterion(new_out, y) # this only has effect from step 1 onwards kdd_loss", "# store training info self.current_loss = loss.mean() loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\",", "lil bit hardcoded i know if self.sample_batches is not None and batch_index in", "self.callbacks.append(callback) return self def setup_callbacks(self) -> None: for callback in self.callbacks: callback.setup(self) def", "self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else: self.sample_batches = np.array([]) return train_dataloader, val_dataloader def", "sum up losses total = seg_loss + self.kdd_lambda * kdd_loss # gather and", "init losses and retrieve x, y x, y = batch # forward and", "= self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) # sum up losses total = seg_loss", "of the prepare # we swap an integer of num samples with a", "for KD\" self.accelerator = accelerator self.debug = debug self.model = new_model self.old_model =", "store for later classwise[metric_name] = score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log", "init losses and retrieve x, y x, y = batch seg_loss, kdd_loss =", "None, kdd_lambda: float = 0.0, kde_lambda: float = 0.0, train_metrics: Dict[str, Metric] =", "metrics: Dict[str, Metric], logger_exclude: Iterable[str] = None, return_preds: bool = False): logger_exclude =", "pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def train_epoch(self, epoch:", "images = self.accelerator.gather(x) y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(preds) # store samples for", "[] exclude = exclude or [] scores = self.current_scores[stage.value] classwise = dict() #", "samples self.sample_content = list() self.callbacks: listdir[BaseCallback] = list() def _prepare(self, train_dataloader: DataLoader, val_dataloader:", "backward pass self.accelerator.backward(loss) self.optimizer.step() # measure elapsed time elapsed = (time.time() - start)", "from torch.optim import Optimizer from torch.utils.data import DataLoader from saticl.logging import BaseLogger from", "self.criterion = seg_criterion # knowledge distillation: KDD = KD on decoder, KDE =", "v_times) for callback in self.callbacks: callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting training\", curr_epoch)", "nn from torch.optim import Optimizer from torch.utils.data import DataLoader from saticl.logging import BaseLogger", "callback: BaseCallback) -> Trainer: self.callbacks.append(callback) return self def setup_callbacks(self) -> None: for callback", "task: Task, new_model: nn.Module, old_model: nn.Module, optimizer: Optimizer, scheduler: Any, old_classes: Dict[int, str],", "typing import TYPE_CHECKING, Any, Dict, Iterable import numpy as np import torch from", "y = y.to(self.accelerator.device) # forward and loss on main task, using AMP with", "self.accelerator.backward(loss) self.optimizer.step() # measure elapsed time elapsed = (time.time() - start) # store", "MultiEncoder) self.criterion_mmd = MultiModalScaling() # optimizer, scheduler and logger, scaler for AMP self.optimizer", "val_metrics: Dict[str, Metric] = None, logger: BaseLogger = None, samples: int = None,", "int = None, stage: str = \"train\", debug: bool = False) -> None:", "%s\", str(classwise)) header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def _debug_training(self, **kwargs: dict) ->", "not None and batch_index in self.sample_batches: self._store_samples(images, y_pred, y_true) # update metrics and", "torch.tensor(0.0) # forward and loss on main task, using AMP with self.accelerator.autocast(): new_out,", "time elapsed = (time.time() - start) # store training info self.current_loss = loss.mean()", "as val loader to store num samples _, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with", "def validation_epoch(self, epoch: int, val_dataloader: DataLoader) -> Any: val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value,", "torch.Tensor, stage: TrainerStage = TrainerStage.train) -> None: with torch.no_grad(): for metric in self.metrics[stage.value].values():", "self.global_step = 0 for curr_epoch in range(max_epochs): self.current_epoch = curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try:", "data[\"tot_loss\"] elapsed = (time.time() - start) # gather info loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\":", "in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self,", "defaultdict from enum import Enum from posix import listdir from typing import TYPE_CHECKING,", "def _log_metrics(self, stage: TrainerStage = TrainerStage.train, exclude: Iterable[str] = None) -> None: log_strings", "self.sample_batches: self._store_samples(images, y_pred, y_true) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test)", "= -1 # internal monitoring self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()} self.best_epoch =", "TrainerStage = TrainerStage.train) -> None: with torch.no_grad(): for metric in self.metrics[stage.value].values(): metric(y_true, y_pred)", "# gather and update metrics # we group only the 'standard' images, not", "already present in metrics\" self.metrics[stage.value] = metrics def step(self) -> None: self.global_step +=", "logger self.step() return losses, timings def train_epoch_end(self, train_losses: dict, train_times: list): with torch.no_grad():", "TrainerStage = TrainerStage.train, exclude: Iterable[str] = None) -> None: log_strings = [] exclude", "None, samples: int = None, stage: str = \"train\", debug: bool = False)", "-> None: assert task.step == 0 or old_model is not None or stage", "segmentation task with self.accelerator.autocast(): new_out, _ = self.model(x) seg_loss = self.criterion(new_out, y) #", "self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch: Any) -> torch.Tensor: # init losses", "logger or EmptyLogger() # setup metrics, if any self.metrics = dict() if train_metrics", "progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main) losses, timings, results = [], [], [] # prepare", "def setup_callbacks(self) -> None: for callback in self.callbacks: callback.setup(self) def dispose_callbacks(self) -> None:", "loggers self.task = task self.old_classes = old_classes self.new_classes = new_classes self.all_classes = OrderedDict(list(old_classes.items())", "DataLoader, val_dataloader: DataLoader = None, max_epochs: int = 100): train_dataloader, val_dataloader = self._prepare(train_dataloader,", "= isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd = MultiModalScaling() # optimizer, scheduler and logger, scaler for", "elapsed = (time.time() - start) # store training info self.current_loss = loss.mean() loss_val", "self.scheduler.step() self.train_epoch_end(t_losses, t_times) if val_dataloader is not None: self.validation_epoch_start() v_losses, v_times = self.validation_epoch(epoch=self.current_epoch,", "self.global_step) for name, item in kwargs.items(): LOG.debug(\"%8s: %s\", name, str(item)) def _store_samples(self, images:", "seg_loss, \"kdd_loss\": kdd_loss} def validation_epoch(self, epoch: int, val_dataloader: DataLoader) -> Any: val_tqdm =", "first log scalars for metric_name, score in scores.items(): if metric_name in exclude: continue", "we do not advance the logger step # during validation (also, it's kind", "\"train\", debug: bool = False) -> None: assert task.step == 0 or old_model", "Metric] = None, logger: BaseLogger = None, samples: int = None, stage: str", "int): # init losses and retrieve x, y x, y = batch seg_loss,", "name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return losses, timings def validation_epoch_end(self, val_losses: list,", "metrics, if any self.metrics = dict() if train_metrics is not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics)", "import get_logger, progressbar from saticl.utils.decorators import get_rank if TYPE_CHECKING: from saticl.trainer.callbacks import BaseCallback", "in self.callbacks: callback.setup(self) def dispose_callbacks(self) -> None: for callback in self.callbacks: callback.dispose(self) def", "batch: Any) -> torch.Tensor: # init losses and retrieve x, y x, y", "for callback in self.callbacks: callback.setup(self) def dispose_callbacks(self) -> None: for callback in self.callbacks:", "in range(max_epochs): self.current_epoch = curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses, t_times = self.train_epoch(epoch=self.current_epoch,", "(time.time() - start) loss_value = loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we do not log", "import EmptyLogger from saticl.losses.regularization import MultiModalScaling from saticl.metrics import Metric from saticl.models.encoders import", "Dict, Iterable import numpy as np import torch from accelerate import Accelerator from", "x.to(self.accelerator.device) y = y.to(self.accelerator.device) # forward and loss on main task, using AMP", "seg_loss = self.criterion(new_out, y) # this only has effect from step 1 onwards", "return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def", "log 'iter' versions for loss and timings, since we do not advance the", "an integer of num samples with a list of indices with same length", "once completed LOG.info(\", \".join(log_strings)) # then log class-wise results in a single table", "self.model.state_dict() self.setup_callbacks() self.global_step = 0 for curr_epoch in range(max_epochs): self.current_epoch = curr_epoch LOG.info(f\"[Epoch", "= [] losses = defaultdict(list) with torch.no_grad(): self.model.eval() for i, batch in enumerate(val_tqdm):", "val = \"val\" test = \"test\" class Trainer: def __init__(self, accelerator: Accelerator, task:", "a list of indices with same length if self.sample_batches is not None and", "dict() if train_metrics is not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics is not None:", "self.current_epoch = -1 self.current_loss = None self.global_step = -1 # internal monitoring self.current_scores", "-> None: log_strings = [] exclude = exclude or [] scores = self.current_scores[stage.value]", "self.task = task self.old_classes = old_classes self.new_classes = new_classes self.all_classes = OrderedDict(list(old_classes.items()) +", "from torch.utils.data import DataLoader from saticl.logging import BaseLogger from saticl.logging.empty import EmptyLogger from", "update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\": total, \"seg_loss\": seg_loss,", "every epoch instead of iteration self.scheduler.step() self.train_epoch_end(t_losses, t_times) if val_dataloader is not None:", "time.time() self.optimizer.zero_grad() data = self.train_batch(batch=batch) loss = data[\"tot_loss\"] # backward pass self.accelerator.backward(loss) self.optimizer.step()", "= KD on decoder, KDE = KD on encoder self.criterion_kdd = kdd_criterion self.criterion_kde", "return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def predict(self, test_dataloader: DataLoader, metrics: Dict[str, Metric],", "saticl.metrics import Metric from saticl.models.encoders import MultiEncoder from saticl.tasks import Task from saticl.utils.common", "val_dataloader = self._prepare(train_dataloader, val_dataloader) self.best_state_dict = self.model.state_dict() self.setup_callbacks() self.global_step = 0 for curr_epoch", "samples for visualization, if present. Requires a plot callback # better to unpack", "= None, logger: BaseLogger = None, samples: int = None, stage: str =", "Iterable[str] = None) -> None: log_strings = [] exclude = exclude or []", "not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL information for loggers self.task = task self.old_classes", "= list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def _debug_training(self, **kwargs: dict) -> None: LOG.debug(\"[Epoch %2d]", "if active if self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\": total, \"seg_loss\":", "setup metrics, if any self.metrics = dict() if train_metrics is not None: self.add_metrics(stage=TrainerStage.train,", "= self.accelerator.gather(preds) # store samples for visualization, if present. Requires a plot callback", "_compute_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: result = dict() with torch.no_grad(): for", "self.validation_epoch_end(v_losses, v_times) for callback in self.callbacks: callback(self) except KeyboardInterrupt: LOG.info(\"[Epoch %2d] Interrupting training\",", "self.accelerator.prepare(self.model, self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader) if val_dataloader is not None: val_dataloader = self.accelerator.prepare(val_dataloader)", "torch.Tensor: # init losses and retrieve x, y x, y = batch #", "iteration: %d\", self.current_epoch, self.global_step) for name, item in kwargs.items(): LOG.debug(\"%8s: %s\", name, str(item))", "val_dataloader: DataLoader) -> Any: val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main) timings =", "self.current_scores[stage.value] classwise = dict() # first log scalars for metric_name, score in scores.items():", "kde_lambda: float = 0.0, train_metrics: Dict[str, Metric] = None, val_metrics: Dict[str, Metric] =", "annotations import time from collections import OrderedDict, defaultdict from enum import Enum from", "results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration on callbacks for", "with torch.no_grad(): for metric in self.metrics[stage.value].values(): metric(y_true, y_pred) def _compute_metrics(self, stage: TrainerStage =", "TrainerStage.train) -> None: result = dict() with torch.no_grad(): for name, metric in self.metrics[stage.value].items():", "return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def validation_epoch(self, epoch: int, val_dataloader: DataLoader)", "val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) return losses, timings def validation_epoch_end(self, val_losses: list, val_times:", "in self.metrics[stage.value].values(): metric.reset() def _log_metrics(self, stage: TrainerStage = TrainerStage.train, exclude: Iterable[str] = None)", "or old_model is not None or stage == \"test\", \"ICL steps require the", "def train_epoch(self, epoch: int, train_dataloader: DataLoader) -> Any: timings = [] losses =", "in enumerate(val_tqdm): start = time.time() data = self.validation_batch(batch=batch, batch_index=i) loss = data[\"tot_loss\"] elapsed", "size later if self.sample_batches is not None and batch_index in self.sample_batches: self._store_samples(images, y_pred,", "step the logger self.step() return losses, timings def train_epoch_end(self, train_losses: dict, train_times: list):", "in exclude: continue if score.ndim > 0: # store for later classwise[metric_name] =", "assert stage.value not in self.metrics, \"stage already present in metrics\" self.metrics[stage.value] = metrics", "False): logger_exclude = logger_exclude or [] self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader,", "self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def validation_epoch(self, epoch:", "self def setup_callbacks(self) -> None: for callback in self.callbacks: callback.setup(self) def dispose_callbacks(self) ->", "log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log the full string once completed LOG.info(\", \".join(log_strings)) # then", "optimizer self.scheduler = scheduler self.logger = logger or EmptyLogger() # setup metrics, if", "or EmptyLogger() # setup metrics, if any self.metrics = dict() if train_metrics is", "f\"{loss_val:.4f}\"}) # we do not log 'iter' versions for loss and timings, since", "the rotated ones y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) #", "train_dataloader: DataLoader) -> Any: timings = [] losses = defaultdict(list) train_tqdm = progressbar(train_dataloader,", "task self.old_classes = old_classes self.new_classes = new_classes self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items())) #", "TrainerStage.train) -> None: for metric in self.metrics[stage.value].values(): metric.reset() def _log_metrics(self, stage: TrainerStage =", "metrics def step(self) -> None: self.global_step += 1 self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def", "[], [] # prepare model and loader, pass as val loader to store", "Dict[int, str], seg_criterion: nn.Module, kdd_criterion: nn.Module, kde_criterion: nn.Module = None, kdd_lambda: float =", "self.model(x) seg_loss = self.criterion(new_out, y) # this only has effect from step 1", "self.task.step > 0: old_out, _ = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) # sum", "test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval() for i, batch in enumerate(test_tqdm): start", "require the old model for KD\" self.accelerator = accelerator self.debug = debug self.model", "0.0, train_metrics: Dict[str, Metric] = None, val_metrics: Dict[str, Metric] = None, logger: BaseLogger", "torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step > 0: old_out, _ = self.old_model(x) kdd_loss =", "and timings, since we do not advance the logger step # during validation", "= self.train_batch(batch=batch) loss = data[\"tot_loss\"] # backward pass self.accelerator.backward(loss) self.optimizer.step() # measure elapsed", "pred_mask)) def add_callback(self, callback: BaseCallback) -> Trainer: self.callbacks.append(callback) return self def setup_callbacks(self) ->", "for i, batch in enumerate(test_tqdm): start = time.time() loss, data = self.test_batch(batch=batch, batch_index=i)", "KD if self.task.step > 0: old_out, old_features = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out)", "results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) # step the logger self.step()", "defaultdict(list) train_tqdm = progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main) self.model.train() for batch in train_tqdm:", "= self.accelerator.gather(new_out) # store samples for visualization, if present. Requires a plot callback", "= self.accelerator.gather(y) y_pred = self.accelerator.gather(preds) # store samples for visualization, if present. Requires", "image = images[i].detach().cpu() true_mask = targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask)) def", "== \"test\", \"ICL steps require the old model for KD\" self.accelerator = accelerator", "Dict[str, Metric], logger_exclude: Iterable[str] = None, return_preds: bool = False): logger_exclude = logger_exclude", "loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) # store results for", "= TrainerStage.train) -> None: with torch.no_grad(): for metric in self.metrics[stage.value].values(): metric(y_true, y_pred) def", "losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def validation_epoch(self,", "result = dict() with torch.no_grad(): for name, metric in self.metrics[stage.value].items(): result[name] = metric.compute()", "train_tqdm: start = time.time() self.optimizer.zero_grad() data = self.train_batch(batch=batch) loss = data[\"tot_loss\"] # backward", "import nn from torch.optim import Optimizer from torch.utils.data import DataLoader from saticl.logging import", "_update_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor, stage: TrainerStage = TrainerStage.train) -> None: with torch.no_grad():", "# this only has effect from step 1 onwards kdd_loss = torch.tensor(0, device=seg_loss.device,", "time.time() loss, data = self.test_batch(batch=batch, batch_index=i) elapsed = (time.time() - start) loss_value =", "self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) # debug if active if self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype,", "int, val_dataloader: DataLoader) -> Any: val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main) timings", "validation (also, it's kind of useless) # store results for name, val in", "ICL information for loggers self.task = task self.old_classes = old_classes self.new_classes = new_classes", "return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def train_epoch(self, epoch: int, train_dataloader: DataLoader)", "with torch.no_grad(): for name, metric in self.metrics[stage.value].items(): result[name] = metric.compute() self.current_scores[stage.value] = result", "the batch size later # also, we take just the first one, a", "Iterable[str] = None, return_preds: bool = False): logger_exclude = logger_exclude or [] self.metrics[TrainerStage.test.value]", "bool = False) -> None: assert task.step == 0 or old_model is not", "hardcoded i know if self.sample_batches is not None and batch_index in self.sample_batches: images", "since we do not advance the logger step # during validation (also, it's", "self.validation_batch(batch=batch, batch_index=i) loss = data[\"tot_loss\"] elapsed = (time.time() - start) # gather info", "saticl.models.encoders import MultiEncoder from saticl.tasks import Task from saticl.utils.common import get_logger, progressbar from", "self.best_state_dict = self.model.state_dict() self.setup_callbacks() self.global_step = 0 for curr_epoch in range(max_epochs): self.current_epoch =", "[] self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main) losses, timings,", "import OrderedDict, defaultdict from enum import Enum from posix import listdir from typing", "self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch: Any, batch_index: int): # init losses and retrieve", "y_true.cpu(), torch.argmax(y_pred, dim=1).cpu()) def predict(self, test_dataloader: DataLoader, metrics: Dict[str, Metric], logger_exclude: Iterable[str] =", "self.sample_batches is not None and batch_index in self.sample_batches: self._store_samples(images, y_pred, y_true) # update", "best place to call it, but it's best to call it every epoch", "self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not the best place to call it, but it's best", "epoch: int, val_dataloader: DataLoader) -> Any: val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main)", "epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main) timings = [] losses = defaultdict(list) with torch.no_grad(): self.model.eval()", "self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None, max_epochs: int = 100):", "update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test) return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred,", "Any, old_classes: Dict[int, str], new_classes: Dict[int, str], seg_criterion: nn.Module, kdd_criterion: nn.Module, kde_criterion: nn.Module", "name, values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val)", "metric in self.metrics[stage.value].items(): result[name] = metric.compute() self.current_scores[stage.value] = result def _reset_metrics(self, stage: TrainerStage", "do not advance the logger step # during validation (also, it's kind of", "main task, using AMP with self.accelerator.autocast(): new_out, new_features = self.model(x) seg_loss = self.criterion(new_out,", "= {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()} self.best_epoch = None self.best_score = None self.best_state_dict =", "= None self.global_step = -1 # internal monitoring self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value:", "= \"train\" val = \"val\" test = \"test\" class Trainer: def __init__(self, accelerator:", "0: self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else: self.sample_batches = np.array([]) return train_dataloader, val_dataloader", "0: old_out, _ = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) # sum up losses", "100): train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader) self.best_state_dict = self.model.state_dict() self.setup_callbacks() self.global_step = 0", "indices with same length if self.sample_batches is not None and self.sample_batches > 0:", "stage: TrainerStage = TrainerStage.train) -> None: for metric in self.metrics[stage.value].values(): metric.reset() def _log_metrics(self,", "from torch import nn from torch.optim import Optimizer from torch.utils.data import DataLoader from", "logger_exclude = logger_exclude or [] self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value,", "task, using AMP with self.accelerator.autocast(): preds, _ = self.model(x) loss = self.criterion(preds, y)", "\"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def train_epoch(self, epoch: int, train_dataloader: DataLoader) -> Any: timings", "pass self.accelerator.backward(loss) self.optimizer.step() # measure elapsed time elapsed = (time.time() - start) #", "= (time.time() - start) # store training info self.current_loss = loss.mean() loss_val =", "self.current_scores[stage.value] = result def _reset_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: for metric", "new_features = self.model(x) seg_loss = self.criterion(new_out, y) # forward and loss for KD", "losses = defaultdict(list) with torch.no_grad(): self.model.eval() for i, batch in enumerate(val_tqdm): start =", "listdir from typing import TYPE_CHECKING, Any, Dict, Iterable import numpy as np import", "y.to(self.accelerator.device) # forward and loss on main task, using AMP with self.accelerator.autocast(): preds,", "test = \"test\" class Trainer: def __init__(self, accelerator: Accelerator, task: Task, new_model: nn.Module,", "a plot callback # better to unpack now, so that we don't have", "= logger or EmptyLogger() # setup metrics, if any self.metrics = dict() if", "saticl.utils.common import get_logger, progressbar from saticl.utils.decorators import get_rank if TYPE_CHECKING: from saticl.trainer.callbacks import", "for metric_name, score in scores.items(): if metric_name in exclude: continue if score.ndim >", "# sum up losses total = seg_loss + self.kdd_lambda * kdd_loss # gather", "str = \"train\", debug: bool = False) -> None: assert task.step == 0", "training\", curr_epoch) break self.dispose_callbacks() return self def test_batch(self, batch: Any, batch_index: int): x,", "x, y x, y = batch # forward and loss on segmentation task", "self._compute_metrics(stage=TrainerStage.val) for name, values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self,", "if score.ndim > 0: # store for later classwise[metric_name] = score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\",", "> 0: # store for later classwise[metric_name] = score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}:", "y_pred = self.accelerator.gather(new_out) # store samples for visualization, if present. Requires a plot", "Interrupting training\", curr_epoch) break self.dispose_callbacks() return self def test_batch(self, batch: Any, batch_index: int):", "Accelerator from torch import nn from torch.optim import Optimizer from torch.utils.data import DataLoader", "= self.accelerator.prepare(self.model, self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader) if val_dataloader is not None: val_dataloader =", "new_classes: Dict[int, str], seg_criterion: nn.Module, kdd_criterion: nn.Module, kde_criterion: nn.Module = None, kdd_lambda: float", "class Trainer: def __init__(self, accelerator: Accelerator, task: Task, new_model: nn.Module, old_model: nn.Module, optimizer:", "metric(y_true, y_pred) def _compute_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: result = dict()", "batch in train_tqdm: start = time.time() self.optimizer.zero_grad() data = self.train_batch(batch=batch) loss = data[\"tot_loss\"]", "def _debug_training(self, **kwargs: dict) -> None: LOG.debug(\"[Epoch %2d] - iteration: %d\", self.current_epoch, self.global_step)", "only has effect from step 1 onwards kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if", "here, because of the prepare # we swap an integer of num samples", "metric_name in exclude: continue if score.ndim > 0: # store for later classwise[metric_name]", "= np.array([]) return train_dataloader, val_dataloader def _update_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor, stage: TrainerStage", "\"kdd_loss\": kdd_loss} def train_epoch(self, epoch: int, train_dataloader: DataLoader) -> Any: timings = []", "return losses, timings def train_epoch_end(self, train_losses: dict, train_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for", "= metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main) losses, timings, results =", "TrainerStage.train) -> None: with torch.no_grad(): for metric in self.metrics[stage.value].values(): metric(y_true, y_pred) def _compute_metrics(self,", "= old_model self.criterion = seg_criterion # knowledge distillation: KDD = KD on decoder,", "during validation (also, it's kind of useless) # store results for name, val", "new_model: nn.Module, old_model: nn.Module, optimizer: Optimizer, scheduler: Any, old_classes: Dict[int, str], new_classes: Dict[int,", "classwise: LOG.debug(\"Classwise: %s\", str(classwise)) header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def _debug_training(self, **kwargs:", "self.best_state_dict = None self.sample_batches = samples self.sample_content = list() self.callbacks: listdir[BaseCallback] = list()", "(time.time() - start) # store training info self.current_loss = loss.mean() loss_val = loss.mean().item()", "int = 100): train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader) self.best_state_dict = self.model.state_dict() self.setup_callbacks() self.global_step", "elapsed = (time.time() - start) loss_value = loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we do", "scheduler and logger, scaler for AMP self.optimizer = optimizer self.scheduler = scheduler self.logger", "one, a lil bit hardcoded i know if self.sample_batches is not None and", "with self.accelerator.autocast(): new_out, new_features = self.model(x) seg_loss = self.criterion(new_out, y) # forward and", "scores = self.current_scores[stage.value] classwise = dict() # first log scalars for metric_name, score", "not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics is not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL", "self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1]) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val)", "torch.no_grad(): for name, metric in self.metrics[stage.value].items(): result[name] = metric.compute() self.current_scores[stage.value] = result def", "x, y = batch # forward and loss on segmentation task with self.accelerator.autocast():", "import torch from accelerate import Accelerator from torch import nn from torch.optim import", "max_epochs: int = 100): train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader) self.best_state_dict = self.model.state_dict() self.setup_callbacks()", "-> None: for callback in self.callbacks: callback.dispose(self) def add_metrics(self, stage: TrainerStage, metrics: Dict[str,", "{\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss} def validation_epoch(self, epoch: int, val_dataloader: DataLoader) ->", "# then log class-wise results in a single table if classwise: LOG.debug(\"Classwise: %s\",", "DataLoader) -> Any: timings = [] losses = defaultdict(list) train_tqdm = progressbar(train_dataloader, epoch=epoch,", "TrainerStage.train, exclude: Iterable[str] = None) -> None: log_strings = [] exclude = exclude", "= get_logger(__name__) class TrainerStage(str, Enum): train = \"train\" val = \"val\" test =", "= kde_lambda self.multimodal = isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd = MultiModalScaling() # optimizer, scheduler and", "pass as val loader to store num samples _, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader)", "nn.Module, optimizer: Optimizer, scheduler: Any, old_classes: Dict[int, str], new_classes: Dict[int, str], seg_criterion: nn.Module,", "from step 1 onwards kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step > 0:", "self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\": kdd_loss}", "kdd_lambda: float = 0.0, kde_lambda: float = 0.0, train_metrics: Dict[str, Metric] = None,", "val_dataloader is not None: self.validation_epoch_start() v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for", "self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch: Any, batch_index: int):", "Trainer: def __init__(self, accelerator: Accelerator, task: Task, new_model: nn.Module, old_model: nn.Module, optimizer: Optimizer,", "return_preds: bool = False): logger_exclude = logger_exclude or [] self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test)", "Requires a plot callback # better to unpack now, so that we don't", "= self.accelerator.prepare(train_dataloader) if val_dataloader is not None: val_dataloader = self.accelerator.prepare(val_dataloader) # we need", "losses, timings def validation_epoch_end(self, val_losses: list, val_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.val) for name,", "log scalars for metric_name, score in scores.items(): if metric_name in exclude: continue if", "item in kwargs.items(): LOG.debug(\"%8s: %s\", name, str(item)) def _store_samples(self, images: torch.Tensor, outputs: torch.Tensor,", "BaseCallback LOG = get_logger(__name__) class TrainerStage(str, Enum): train = \"train\" val = \"val\"", "knowledge distillation: KDD = KD on decoder, KDE = KD on encoder self.criterion_kdd", "retrieve x, y x, y = batch # forward and loss on segmentation", "# iteration on callbacks for the test set (e.g. display images) for callback", "metrics=val_metrics) # ICL information for loggers self.task = task self.old_classes = old_classes self.new_classes", "in self.metrics[stage.value].items(): result[name] = metric.compute() self.current_scores[stage.value] = result def _reset_metrics(self, stage: TrainerStage =", "test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main) losses, timings, results = [], [], []", "y = batch x = x.to(self.accelerator.device) y = y.to(self.accelerator.device) # forward and loss", "log the full string once completed LOG.info(\", \".join(log_strings)) # then log class-wise results", "0 or old_model is not None or stage == \"test\", \"ICL steps require", "for name, values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear()", "# debug if active if self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\":", "not the rotated ones y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train)", "DataLoader = None) -> None: self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) train_dataloader = self.accelerator.prepare(train_dataloader)", "1 self.logger.step() def train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch: Any) -> torch.Tensor: # init", "= defaultdict(list) with torch.no_grad(): self.model.eval() for i, batch in enumerate(val_tqdm): start = time.time()", "self.accelerator.gather(y) y_pred = self.accelerator.gather(preds) # store samples for visualization, if present. Requires a", "steps require the old model for KD\" self.accelerator = accelerator self.debug = debug", "torch.optim import Optimizer from torch.utils.data import DataLoader from saticl.logging import BaseLogger from saticl.logging.empty", "to do this here, because of the prepare # we swap an integer", "timings, since we do not advance the logger step # during validation (also,", "kdd_loss y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) # store samples for visualization, if", "stage: TrainerStage = TrainerStage.train) -> None: with torch.no_grad(): for metric in self.metrics[stage.value].values(): metric(y_true,", "val_dataloader) self.best_state_dict = self.model.state_dict() self.setup_callbacks() self.global_step = 0 for curr_epoch in range(max_epochs): self.current_epoch", "nn.Module = None, kdd_lambda: float = 0.0, kde_lambda: float = 0.0, train_metrics: Dict[str,", "nn.Module, kdd_criterion: nn.Module, kde_criterion: nn.Module = None, kdd_lambda: float = 0.0, kde_lambda: float", "i, batch in enumerate(val_tqdm): start = time.time() data = self.validation_batch(batch=batch, batch_index=i) loss =", "also, we take just the first one, a lil bit hardcoded i know", "for loss and timings, since we do not advance the logger step #", "self.global_step = -1 # internal monitoring self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()} self.best_epoch", "self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration on callbacks for the test set (e.g. display images)", "batch_index in self.sample_batches: self._store_samples(images, y_pred, y_true) # update metrics and return losses self._update_metrics(y_true=y_true,", "OrderedDict, defaultdict from enum import Enum from posix import listdir from typing import", "= None, samples: int = None, stage: str = \"train\", debug: bool =", "KD on decoder, KDE = KD on encoder self.criterion_kdd = kdd_criterion self.criterion_kde =", "kdd_loss # gather and update metrics # we group only the 'standard' images,", "start) # gather info loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we do not", "y x, y = batch # forward and loss on segmentation task with", "y x, y = batch seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0) # forward and", "loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) # store", "we swap an integer of num samples with a list of indices with", "train_epoch(self, epoch: int, train_dataloader: DataLoader) -> Any: timings = [] losses = defaultdict(list)", "# backward pass self.accelerator.backward(loss) self.optimizer.step() # measure elapsed time elapsed = (time.time() -", "float = 0.0, train_metrics: Dict[str, Metric] = None, val_metrics: Dict[str, Metric] = None,", "torch.tensor(0.0), torch.tensor(0.0) # forward and loss on main task, using AMP with self.accelerator.autocast():", "_, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval() for i, batch in enumerate(test_tqdm):", "None and batch_index in self.sample_batches: self._store_samples(images, y_pred, y_true) # update metrics and return", "# step the logger self.step() return losses, timings def train_epoch_end(self, train_losses: dict, train_times:", "score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log the full string once completed", "0: old_out, old_features = self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) total = seg_loss +", "name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) # step the logger self.step() return losses,", "{score:.4f}\") # log the full string once completed LOG.info(\", \".join(log_strings)) # then log", "losses, timings, results = [], [], [] # prepare model and loader, pass", "= time.time() loss, data = self.test_batch(batch=batch, batch_index=i) elapsed = (time.time() - start) loss_value", "exclude=logger_exclude) # iteration on callbacks for the test set (e.g. display images) for", "forward and loss on segmentation task with self.accelerator.autocast(): new_out, _ = self.model(x) seg_loss", "# we need to do this here, because of the prepare # we", "= TrainerStage.train, exclude: Iterable[str] = None) -> None: log_strings = [] exclude =", "in self.sample_batches: self._store_samples(images, y_pred, y_true) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred,", "batch_index in self.sample_batches: images = self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1]) # update metrics and", "train_epoch_end(self, train_losses: dict, train_times: list): with torch.no_grad(): self._compute_metrics(stage=TrainerStage.train) for name, values in train_losses.items():", "_ = self.model(x) loss = self.criterion(preds, y) # gather info images = self.accelerator.gather(x)", "\"test\" class Trainer: def __init__(self, accelerator: Accelerator, task: Task, new_model: nn.Module, old_model: nn.Module,", "f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) # store results for name, val", "-> None: for callback in self.callbacks: callback.setup(self) def dispose_callbacks(self) -> None: for callback", "self.model = new_model self.old_model = old_model self.criterion = seg_criterion # knowledge distillation: KDD", "just the first one, a lil bit hardcoded i know if self.sample_batches is", "from saticl.models.encoders import MultiEncoder from saticl.tasks import Task from saticl.utils.common import get_logger, progressbar", "torch.argmax(y_pred, dim=1).cpu()) def predict(self, test_dataloader: DataLoader, metrics: Dict[str, Metric], logger_exclude: Iterable[str] = None,", "y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train) # debug if active", "unpack now, so that we don't have to deal with the batch size", "step # during validation (also, it's kind of useless) # store results for", "with self.accelerator.autocast(): preds, _ = self.model(x) loss = self.criterion(preds, y) # gather info", "prepare # we swap an integer of num samples with a list of", "batch_index: int): # init losses and retrieve x, y x, y = batch", "store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) # step the logger", "None or stage == \"test\", \"ICL steps require the old model for KD\"", "batch size later # also, we take just the first one, a lil", "or stage == \"test\", \"ICL steps require the old model for KD\" self.accelerator", "**kwargs: dict) -> None: LOG.debug(\"[Epoch %2d] - iteration: %d\", self.current_epoch, self.global_step) for name,", "= False) -> None: assert task.step == 0 or old_model is not None", "it's kind of useless) # store results for name, val in data.items(): losses[name].append(val.mean().item())", "of num samples with a list of indices with same length if self.sample_batches", "if classwise: LOG.debug(\"Classwise: %s\", str(classwise)) header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\", headers=header, results=classwise) def _debug_training(self,", "loss = data[\"tot_loss\"] elapsed = (time.time() - start) # gather info loss_val =", "loss on segmentation task with self.accelerator.autocast(): new_out, _ = self.model(x) seg_loss = self.criterion(new_out,", "metric in self.metrics[stage.value].values(): metric(y_true, y_pred) def _compute_metrics(self, stage: TrainerStage = TrainerStage.train) -> None:", "if metric_name in exclude: continue if score.ndim > 0: # store for later", "DataLoader) -> Any: val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main) timings = []", "= curr_epoch LOG.info(f\"[Epoch {self.current_epoch:>2d}]\") try: self.train_epoch_start() t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not", "# gather info loss_val = loss.mean().item() val_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) # we do not log", "of indices with same length if self.sample_batches is not None and self.sample_batches >", "we need to do this here, because of the prepare # we swap", "def _reset_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: for metric in self.metrics[stage.value].values(): metric.reset()", "[] scores = self.current_scores[stage.value] classwise = dict() # first log scalars for metric_name,", "losses = defaultdict(list) train_tqdm = progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not self.is_main) self.model.train() for batch", "from saticl.metrics import Metric from saticl.models.encoders import MultiEncoder from saticl.tasks import Task from", "old_model self.criterion = seg_criterion # knowledge distillation: KDD = KD on decoder, KDE", "y_pred) def _compute_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: result = dict() with", "bool = False): logger_exclude = logger_exclude or [] self.metrics[TrainerStage.test.value] = metrics self._reset_metrics(stage=TrainerStage.test) test_tqdm", "optimizer: Optimizer, scheduler: Any, old_classes: Dict[int, str], new_classes: Dict[int, str], seg_criterion: nn.Module, kdd_criterion:", "# store results for name, val in data.items(): losses[name].append(val.mean().item()) timings.append(elapsed) # step the", "and loss on main task, using AMP with self.accelerator.autocast(): preds, _ = self.model(x)", "saticl.logging import BaseLogger from saticl.logging.empty import EmptyLogger from saticl.losses.regularization import MultiModalScaling from saticl.metrics", "and loss on segmentation task with self.accelerator.autocast(): new_out, _ = self.model(x) seg_loss =", "self.train_batch(batch=batch) loss = data[\"tot_loss\"] # backward pass self.accelerator.backward(loss) self.optimizer.step() # measure elapsed time", "for name, metric in self.metrics[stage.value].items(): result[name] = metric.compute() self.current_scores[stage.value] = result def _reset_metrics(self,", "AMP with self.accelerator.autocast(): new_out, new_features = self.model(x) seg_loss = self.criterion(new_out, y) # forward", "def fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None, max_epochs: int = 100): train_dataloader,", "<reponame>edornd/multimodal-icl from __future__ import annotations import time from collections import OrderedDict, defaultdict from", "validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def validation_batch(self, batch: Any, batch_index: int): # init losses and", "DataLoader, val_dataloader: DataLoader = None) -> None: self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) train_dataloader", "train_dataloader = self.accelerator.prepare(train_dataloader) if val_dataloader is not None: val_dataloader = self.accelerator.prepare(val_dataloader) # we", "val_dataloader: DataLoader = None, max_epochs: int = 100): train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader)", "-> None: with torch.no_grad(): for metric in self.metrics[stage.value].values(): metric(y_true, y_pred) def _compute_metrics(self, stage:", "seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0) # forward and loss on main task, using", "train_epoch_start(self): self._reset_metrics(stage=TrainerStage.train) def train_batch(self, batch: Any) -> torch.Tensor: # init losses and retrieve", "batch # forward and loss on segmentation task with self.accelerator.autocast(): new_out, _ =", "Metric] = None, val_metrics: Dict[str, Metric] = None, logger: BaseLogger = None, samples:", "store samples for visualization, if present. Requires a plot callback # better to", "EmptyLogger from saticl.losses.regularization import MultiModalScaling from saticl.metrics import Metric from saticl.models.encoders import MultiEncoder", "self.is_main = self.rank == 0 self.current_epoch = -1 self.current_loss = None self.global_step =", "Any: val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main) timings = [] losses =", "= None, kdd_lambda: float = 0.0, kde_lambda: float = 0.0, train_metrics: Dict[str, Metric]", "= self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval() for i, batch in enumerate(test_tqdm): start =", "self.callbacks: listdir[BaseCallback] = list() def _prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None) ->", "that we don't have to deal with the batch size later # also,", "now, so that we don't have to deal with the batch size later", "not None or stage == \"test\", \"ICL steps require the old model for", "we don't have to deal with the batch size later # also, we", "self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics is not None: self.add_metrics(stage=TrainerStage.val, metrics=val_metrics) # ICL information for", "then log class-wise results in a single table if classwise: LOG.debug(\"Classwise: %s\", str(classwise))", "BaseCallback) -> Trainer: self.callbacks.append(callback) return self def setup_callbacks(self) -> None: for callback in", "self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None, max_epochs: int", "time from collections import OrderedDict, defaultdict from enum import Enum from posix import", "import MultiEncoder from saticl.tasks import Task from saticl.utils.common import get_logger, progressbar from saticl.utils.decorators", "saticl.logging.empty import EmptyLogger from saticl.losses.regularization import MultiModalScaling from saticl.metrics import Metric from saticl.models.encoders", "self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None,", "with a list of indices with same length if self.sample_batches is not None", "losses.append(loss_value) timings.append(elapsed) if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses)) self.logger.log_scalar(\"test/time\", np.mean(timings)) self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) #", "gather info images = self.accelerator.gather(x) y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(preds) # store", "# init losses and retrieve x, y x, y = batch seg_loss, kdd_loss", "the prepare # we swap an integer of num samples with a list", "= batch # forward and loss on segmentation task with self.accelerator.autocast(): new_out, _", "do not log 'iter' versions for loss and timings, since we do not", "assert task.step == 0 or old_model is not None or stage == \"test\",", "# better to unpack now, so that we don't have to deal with", "list of indices with same length if self.sample_batches is not None and self.sample_batches", "old_classes self.new_classes = new_classes self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items())) # internal state self.rank", "str], seg_criterion: nn.Module, kdd_criterion: nn.Module, kde_criterion: nn.Module = None, kdd_lambda: float = 0.0,", "samples _, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad(): self.model.eval() for i, batch in", "val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main) timings = [] losses = defaultdict(list)", "kdd_lambda self.kde_lambda = kde_lambda self.multimodal = isinstance(new_model.encoder, MultiEncoder) self.criterion_mmd = MultiModalScaling() # optimizer,", "decoder, KDE = KD on encoder self.criterion_kdd = kdd_criterion self.criterion_kde = kde_criterion self.kdd_lambda", "metric.compute() self.current_scores[stage.value] = result def _reset_metrics(self, stage: TrainerStage = TrainerStage.train) -> None: for", "for visualization, if present. Requires a plot callback # better to unpack now,", "from saticl.utils.decorators import get_rank if TYPE_CHECKING: from saticl.trainer.callbacks import BaseCallback LOG = get_logger(__name__)", "self.kdd_lambda * kdd_loss # gather and update metrics # we group only the", "in a single table if classwise: LOG.debug(\"Classwise: %s\", str(classwise)) header = list(self.all_classes.values()) self.logger.log_results(f\"{stage.value}/results\",", "1 onwards kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step > 0: old_out, _", "x = x.to(self.accelerator.device) y = y.to(self.accelerator.device) # forward and loss on main task,", "self.kdd_lambda * kdd_loss y_true = self.accelerator.gather(y) y_pred = self.accelerator.gather(new_out) # store samples for", "score in scores.items(): if metric_name in exclude: continue if score.ndim > 0: #", "new_out, _ = self.model(x) seg_loss = self.criterion(new_out, y) # this only has effect", "= targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask)) def add_callback(self, callback: BaseCallback) ->", "list(new_classes.items())) # internal state self.rank = get_rank() self.is_main = self.rank == 0 self.current_epoch", "loss_value = loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we do not log 'iter' versions, as", "not None: self.validation_epoch_start() v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader) self.validation_epoch_end(v_losses, v_times) for callback in", "don't have to deal with the batch size later if self.sample_batches is not", "on main task, using AMP with self.accelerator.autocast(): preds, _ = self.model(x) loss =", "active if self.debug: self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss) return {\"tot_loss\": total, \"seg_loss\": seg_loss,", "and batch_index in self.sample_batches: images = self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1]) # update metrics", "Any, Dict, Iterable import numpy as np import torch from accelerate import Accelerator", "using AMP with self.accelerator.autocast(): new_out, new_features = self.model(x) seg_loss = self.criterion(new_out, y) #", "= batch x = x.to(self.accelerator.device) y = y.to(self.accelerator.device) # forward and loss on", "targets[i].detach().cpu() pred_mask = outputs[i].detach().cpu() self.sample_content.append((image, true_mask, pred_mask)) def add_callback(self, callback: BaseCallback) -> Trainer:", "later classwise[metric_name] = score continue self.logger.log_scalar(f\"{stage.value}/{metric_name}\", score) log_strings.append(f\"{stage.value}/{metric_name}: {score:.4f}\") # log the full", "val loader to store num samples _, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader) with torch.no_grad():", "debug: bool = False) -> None: assert task.step == 0 or old_model is", "for metric in self.metrics[stage.value].values(): metric.reset() def _log_metrics(self, stage: TrainerStage = TrainerStage.train, exclude: Iterable[str]", "self.model(x) loss = self.criterion(preds, y) # gather info images = self.accelerator.gather(x) y_true =", "loss.mean() loss_val = loss.mean().item() train_tqdm.set_postfix({\"loss\": f\"{loss_val:.4f}\"}) self.logger.log_scalar(\"train/loss_iter\", loss_val) self.logger.log_scalar(\"train/lr\", self.optimizer.param_groups[0][\"lr\"]) self.logger.log_scalar(\"train/time_iter\", elapsed) #", "log 'iter' versions, as for validation losses.append(loss_value) timings.append(elapsed) if return_preds: results.append(data) self.logger.log_scalar(\"test/loss\", np.mean(losses))", "= (time.time() - start) loss_value = loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we do not", "Any: timings = [] losses = defaultdict(list) train_tqdm = progressbar(train_dataloader, epoch=epoch, stage=TrainerStage.train.value, disable=not", "-> Trainer: assert stage.value not in self.metrics, \"stage already present in metrics\" self.metrics[stage.value]", "self._compute_metrics(stage=TrainerStage.test) self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude) # iteration on callbacks for the test set (e.g. display", "import BaseLogger from saticl.logging.empty import EmptyLogger from saticl.losses.regularization import MultiModalScaling from saticl.metrics import", "t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader) # not the best place to call it,", "self.old_model(x) kdd_loss = self.criterion_kdd(new_out, old_out) total = seg_loss + self.kdd_lambda * kdd_loss y_true", "with torch.no_grad(): self.model.eval() for i, batch in enumerate(val_tqdm): start = time.time() data =", "values in train_losses.items(): self.logger.log_scalar(f\"train/{name}\", np.mean(values)) self.logger.log_scalar(\"train/time\", np.mean(train_times)) self._log_metrics(stage=TrainerStage.train) def validation_epoch_start(self): self.sample_content.clear() self._reset_metrics(stage=TrainerStage.val) def", "progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main) timings = [] losses = defaultdict(list) with torch.no_grad():", "return train_dataloader, val_dataloader def _update_metrics(self, y_true: torch.Tensor, y_pred: torch.Tensor, stage: TrainerStage = TrainerStage.train)", "\"kdd_loss\": kdd_loss} def validation_epoch(self, epoch: int, val_dataloader: DataLoader) -> Any: val_tqdm = progressbar(val_dataloader,", "= self.accelerator.gather(x) self._store_samples(images[:1], y_pred[:1], y_true[:1]) # update metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred,", "so that we don't have to deal with the batch size later #", "saticl.trainer.callbacks import BaseCallback LOG = get_logger(__name__) class TrainerStage(str, Enum): train = \"train\" val", "results=classwise) def _debug_training(self, **kwargs: dict) -> None: LOG.debug(\"[Epoch %2d] - iteration: %d\", self.current_epoch,", "self.model.train() for batch in train_tqdm: start = time.time() self.optimizer.zero_grad() data = self.train_batch(batch=batch) loss", "self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items())) # internal state self.rank = get_rank() self.is_main =", "from saticl.trainer.callbacks import BaseCallback LOG = get_logger(__name__) class TrainerStage(str, Enum): train = \"train\"", "we do not log 'iter' versions, as for validation losses.append(loss_value) timings.append(elapsed) if return_preds:", "not None and self.sample_batches > 0: self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False) else: self.sample_batches", "Any) -> torch.Tensor: # init losses and retrieve x, y x, y =", "values in val_losses.items(): self.logger.log_scalar(f\"val/{name}\", np.mean(values)) self.logger.log_scalar(\"val/time\", np.mean(val_times)) self._log_metrics(stage=TrainerStage.val) def fit(self, train_dataloader: DataLoader, val_dataloader:", "train_dataloader: DataLoader, val_dataloader: DataLoader = None, max_epochs: int = 100): train_dataloader, val_dataloader =", "self.logger = logger or EmptyLogger() # setup metrics, if any self.metrics = dict()", "- start) loss_value = loss.item() test_tqdm.set_postfix({\"loss\": f\"{loss_value:.4f}\"}) # we do not log 'iter'", "the logger step # during validation (also, it's kind of useless) # store", "new_out, new_features = self.model(x) seg_loss = self.criterion(new_out, y) # forward and loss for", "log class-wise results in a single table if classwise: LOG.debug(\"Classwise: %s\", str(classwise)) header", "train_dataloader=train_dataloader) # not the best place to call it, but it's best to", "enumerate(test_tqdm): start = time.time() loss, data = self.test_batch(batch=batch, batch_index=i) elapsed = (time.time() -", "self.metrics = dict() if train_metrics is not None: self.add_metrics(stage=TrainerStage.train, metrics=train_metrics) if val_metrics is", "device=seg_loss.device, dtype=seg_loss.dtype) if self.task.step > 0: old_out, _ = self.old_model(x) kdd_loss = self.criterion_kdd(new_out,", "metrics and return losses self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val) return {\"tot_loss\": total, \"seg_loss\": seg_loss, \"kdd_loss\":", "from typing import TYPE_CHECKING, Any, Dict, Iterable import numpy as np import torch" ]
[ "url = contents[\"url\"] return url def woof(update, context): url = get_url() chat_id =", "re import os URL = \"https://random.dog/woof.json\" def get_url(): contents = requests.get(URL).json() url =", "def get_url(): contents = requests.get(URL).json() url = contents[\"url\"] return url def woof(update, context):", "= get_url() chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or", "\"NA\" updater = Updater(token=token_id, use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler(\"woof\", woof)) updater.start_polling() updater.idle() if", "updater = Updater(token=token_id, use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler(\"woof\", woof)) updater.start_polling() updater.idle() if __name__", "from telegram import update from telegram.ext import Updater, CommandHandler import requests import re", "get_url(): contents = requests.get(URL).json() url = contents[\"url\"] return url def woof(update, context): url", "os URL = \"https://random.dog/woof.json\" def get_url(): contents = requests.get(URL).json() url = contents[\"url\"] return", "os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater = Updater(token=token_id, use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler(\"woof\", woof)) updater.start_polling()", "url def woof(update, context): url = get_url() chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def", "URL = \"https://random.dog/woof.json\" def get_url(): contents = requests.get(URL).json() url = contents[\"url\"] return url", "context.bot.send_photo(chat_id=chat_id, photo=url) def main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater = Updater(token=token_id, use_context=True)", "Updater(token=token_id, use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler(\"woof\", woof)) updater.start_polling() updater.idle() if __name__ == \"__main__\":", "return url def woof(update, context): url = get_url() chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url)", "get_url() chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\"", "= update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater =", "token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater = Updater(token=token_id, use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler(\"woof\",", "def main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater = Updater(token=token_id, use_context=True) dp =", "def woof(update, context): url = get_url() chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def main():", "= contents[\"url\"] return url def woof(update, context): url = get_url() chat_id = update.message.chat_id", "main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater = Updater(token=token_id, use_context=True) dp = updater.dispatcher", "chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater", "woof(update, context): url = get_url() chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def main(): token_id", "telegram import update from telegram.ext import Updater, CommandHandler import requests import re import", "contents = requests.get(URL).json() url = contents[\"url\"] return url def woof(update, context): url =", "telegram.ext import Updater, CommandHandler import requests import re import os URL = \"https://random.dog/woof.json\"", "= \"https://random.dog/woof.json\" def get_url(): contents = requests.get(URL).json() url = contents[\"url\"] return url def", "url = get_url() chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"]", "= Updater(token=token_id, use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler(\"woof\", woof)) updater.start_polling() updater.idle() if __name__ ==", "requests import re import os URL = \"https://random.dog/woof.json\" def get_url(): contents = requests.get(URL).json()", "import re import os URL = \"https://random.dog/woof.json\" def get_url(): contents = requests.get(URL).json() url", "import requests import re import os URL = \"https://random.dog/woof.json\" def get_url(): contents =", "import Updater, CommandHandler import requests import re import os URL = \"https://random.dog/woof.json\" def", "\"https://random.dog/woof.json\" def get_url(): contents = requests.get(URL).json() url = contents[\"url\"] return url def woof(update,", "contents[\"url\"] return url def woof(update, context): url = get_url() chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id,", "context): url = get_url() chat_id = update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def main(): token_id =", "update.message.chat_id context.bot.send_photo(chat_id=chat_id, photo=url) def main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater = Updater(token=token_id,", "Updater, CommandHandler import requests import re import os URL = \"https://random.dog/woof.json\" def get_url():", "= os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater = Updater(token=token_id, use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler(\"woof\", woof))", "or \"NA\" updater = Updater(token=token_id, use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler(\"woof\", woof)) updater.start_polling() updater.idle()", "requests.get(URL).json() url = contents[\"url\"] return url def woof(update, context): url = get_url() chat_id", "from telegram.ext import Updater, CommandHandler import requests import re import os URL =", "import os URL = \"https://random.dog/woof.json\" def get_url(): contents = requests.get(URL).json() url = contents[\"url\"]", "photo=url) def main(): token_id = os.environ[\"TELEGRAM_WOOF_TOKEN\"] or \"NA\" updater = Updater(token=token_id, use_context=True) dp", "use_context=True) dp = updater.dispatcher dp.add_handler(CommandHandler(\"woof\", woof)) updater.start_polling() updater.idle() if __name__ == \"__main__\": main()", "CommandHandler import requests import re import os URL = \"https://random.dog/woof.json\" def get_url(): contents", "= requests.get(URL).json() url = contents[\"url\"] return url def woof(update, context): url = get_url()", "import update from telegram.ext import Updater, CommandHandler import requests import re import os", "update from telegram.ext import Updater, CommandHandler import requests import re import os URL" ]
[ "'external/test.csv')) # Convert df to values train_values = train.values[:, 1:] test_values = test.values", "encoding number_of_classes = 10 y_train = train.values[:,0] y_train = keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'),", "train_path = os.path.join(data_path, 'processed') def main(): print(os.getcwd()) train = pd.read_csv(os.path.join(data_path, 'external/train.csv')) test =", "as pd import numpy as np import keras import os prefix = '/'", "df to values train_values = train.values[:, 1:] test_values = test.values # Reshape and", "y_train = train.values[:,0] y_train = keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test)", "import os prefix = '/' if \"IS_CONTAINER\" in os.environ else './' data_path =", "= os.path.join(prefix, 'opt/ml/input/data') train_path = os.path.join(data_path, 'processed') def main(): print(os.getcwd()) train = pd.read_csv(os.path.join(data_path,", "data X_train = reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values) # one hot encoding number_of_classes =", "'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0], 28, 28, 1) array = array.astype(", "os.path.join(data_path, 'processed') def main(): print(os.getcwd()) train = pd.read_csv(os.path.join(data_path, 'external/train.csv')) test = pd.read_csv(os.path.join(data_path, 'external/test.csv'))", "import keras import os prefix = '/' if \"IS_CONTAINER\" in os.environ else './'", "= pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert df to values train_values = train.values[:, 1:] test_values", "import numpy as np import keras import os prefix = '/' if \"IS_CONTAINER\"", "train.values[:,0] y_train = keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'),", "X_train = reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values) # one hot encoding number_of_classes = 10", "# Convert df to values train_values = train.values[:, 1:] test_values = test.values #", "'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0],", "1:] test_values = test.values # Reshape and normalize training data X_train = reshapeAndNormalizeXValues(train_values)", "X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0], 28, 28, 1) array", "test_values = test.values # Reshape and normalize training data X_train = reshapeAndNormalizeXValues(train_values) X_test", "np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array): array =", "= keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train) def", "pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert df to values train_values = train.values[:, 1:] test_values =", "array.reshape(array.shape[0], 28, 28, 1) array = array.astype( 'float32' ) array = array /", "os prefix = '/' if \"IS_CONTAINER\" in os.environ else './' data_path = os.path.join(prefix,", "# Reshape and normalize training data X_train = reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values) #", "Convert df to values train_values = train.values[:, 1:] test_values = test.values # Reshape", "def reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0], 28, 28, 1) array = array.astype( 'float32' )", "number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array): array", "hot encoding number_of_classes = 10 y_train = train.values[:,0] y_train = keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path,", "X_test = reshapeAndNormalizeXValues(test_values) # one hot encoding number_of_classes = 10 y_train = train.values[:,0]", "= reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values) # one hot encoding number_of_classes = 10 y_train", "main(): print(os.getcwd()) train = pd.read_csv(os.path.join(data_path, 'external/train.csv')) test = pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert df", "np.save(os.path.join(train_path, 'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0], 28, 28, 1) array =", "28, 1) array = array.astype( 'float32' ) array = array / 255.0 return", "in os.environ else './' data_path = os.path.join(prefix, 'opt/ml/input/data') train_path = os.path.join(data_path, 'processed') def", "'./' data_path = os.path.join(prefix, 'opt/ml/input/data') train_path = os.path.join(data_path, 'processed') def main(): print(os.getcwd()) train", "train_values = train.values[:, 1:] test_values = test.values # Reshape and normalize training data", "array.astype( 'float32' ) array = array / 255.0 return array if __name__ ==", "normalize training data X_train = reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values) # one hot encoding", "'float32' ) array = array / 255.0 return array if __name__ == \"__main__\":", "reshapeAndNormalizeXValues(test_values) # one hot encoding number_of_classes = 10 y_train = train.values[:,0] y_train =", "numpy as np import keras import os prefix = '/' if \"IS_CONTAINER\" in", "test = pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert df to values train_values = train.values[:, 1:]", "one hot encoding number_of_classes = 10 y_train = train.values[:,0] y_train = keras.utils.to_categorical(y_train, number_of_classes)", "= pd.read_csv(os.path.join(data_path, 'external/train.csv')) test = pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert df to values train_values", "<reponame>fbomb111/full-stack-mlops import pandas as pd import numpy as np import keras import os", "'processed') def main(): print(os.getcwd()) train = pd.read_csv(os.path.join(data_path, 'external/train.csv')) test = pd.read_csv(os.path.join(data_path, 'external/test.csv')) #", "train.values[:, 1:] test_values = test.values # Reshape and normalize training data X_train =", "and normalize training data X_train = reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values) # one hot", "train = pd.read_csv(os.path.join(data_path, 'external/train.csv')) test = pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert df to values", "'/' if \"IS_CONTAINER\" in os.environ else './' data_path = os.path.join(prefix, 'opt/ml/input/data') train_path =", "array = array.reshape(array.shape[0], 28, 28, 1) array = array.astype( 'float32' ) array =", "= array.astype( 'float32' ) array = array / 255.0 return array if __name__", "X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0], 28,", "test.values # Reshape and normalize training data X_train = reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values)", "'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0], 28, 28, 1)", "= 10 y_train = train.values[:,0] y_train = keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path,", "\"IS_CONTAINER\" in os.environ else './' data_path = os.path.join(prefix, 'opt/ml/input/data') train_path = os.path.join(data_path, 'processed')", "y_train) def reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0], 28, 28, 1) array = array.astype( 'float32'", "pd import numpy as np import keras import os prefix = '/' if", "import pandas as pd import numpy as np import keras import os prefix", "reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0], 28, 28, 1) array = array.astype( 'float32' ) array", "# one hot encoding number_of_classes = 10 y_train = train.values[:,0] y_train = keras.utils.to_categorical(y_train,", "= array.reshape(array.shape[0], 28, 28, 1) array = array.astype( 'float32' ) array = array", "keras import os prefix = '/' if \"IS_CONTAINER\" in os.environ else './' data_path", "Reshape and normalize training data X_train = reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values) # one", "y_train = keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train)", "values train_values = train.values[:, 1:] test_values = test.values # Reshape and normalize training", "print(os.getcwd()) train = pd.read_csv(os.path.join(data_path, 'external/train.csv')) test = pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert df to", "keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array):", "os.path.join(prefix, 'opt/ml/input/data') train_path = os.path.join(data_path, 'processed') def main(): print(os.getcwd()) train = pd.read_csv(os.path.join(data_path, 'external/train.csv'))", "if \"IS_CONTAINER\" in os.environ else './' data_path = os.path.join(prefix, 'opt/ml/input/data') train_path = os.path.join(data_path,", "number_of_classes = 10 y_train = train.values[:,0] y_train = keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train)", "'opt/ml/input/data') train_path = os.path.join(data_path, 'processed') def main(): print(os.getcwd()) train = pd.read_csv(os.path.join(data_path, 'external/train.csv')) test", "= os.path.join(data_path, 'processed') def main(): print(os.getcwd()) train = pd.read_csv(os.path.join(data_path, 'external/train.csv')) test = pd.read_csv(os.path.join(data_path,", "def main(): print(os.getcwd()) train = pd.read_csv(os.path.join(data_path, 'external/train.csv')) test = pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert", "as np import keras import os prefix = '/' if \"IS_CONTAINER\" in os.environ", "28, 28, 1) array = array.astype( 'float32' ) array = array / 255.0", "= reshapeAndNormalizeXValues(test_values) # one hot encoding number_of_classes = 10 y_train = train.values[:,0] y_train", "= train.values[:, 1:] test_values = test.values # Reshape and normalize training data X_train", "= test.values # Reshape and normalize training data X_train = reshapeAndNormalizeXValues(train_values) X_test =", "'external/train.csv')) test = pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert df to values train_values = train.values[:,", "1) array = array.astype( 'float32' ) array = array / 255.0 return array", "else './' data_path = os.path.join(prefix, 'opt/ml/input/data') train_path = os.path.join(data_path, 'processed') def main(): print(os.getcwd())", "= train.values[:,0] y_train = keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path,", "pandas as pd import numpy as np import keras import os prefix =", "data_path = os.path.join(prefix, 'opt/ml/input/data') train_path = os.path.join(data_path, 'processed') def main(): print(os.getcwd()) train =", "prefix = '/' if \"IS_CONTAINER\" in os.environ else './' data_path = os.path.join(prefix, 'opt/ml/input/data')", "to values train_values = train.values[:, 1:] test_values = test.values # Reshape and normalize", "pd.read_csv(os.path.join(data_path, 'external/train.csv')) test = pd.read_csv(os.path.join(data_path, 'external/test.csv')) # Convert df to values train_values =", "10 y_train = train.values[:,0] y_train = keras.utils.to_categorical(y_train, number_of_classes) np.save(os.path.join(train_path, 'X_train.npy'), X_train) np.save(os.path.join(train_path, 'X_test.npy'),", ") array = array / 255.0 return array if __name__ == \"__main__\": main()", "np import keras import os prefix = '/' if \"IS_CONTAINER\" in os.environ else", "reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values) # one hot encoding number_of_classes = 10 y_train =", "training data X_train = reshapeAndNormalizeXValues(train_values) X_test = reshapeAndNormalizeXValues(test_values) # one hot encoding number_of_classes", "array = array.astype( 'float32' ) array = array / 255.0 return array if", "os.environ else './' data_path = os.path.join(prefix, 'opt/ml/input/data') train_path = os.path.join(data_path, 'processed') def main():", "np.save(os.path.join(train_path, 'X_test.npy'), X_test) np.save(os.path.join(train_path, 'y_train.npy'), y_train) def reshapeAndNormalizeXValues(array): array = array.reshape(array.shape[0], 28, 28,", "= '/' if \"IS_CONTAINER\" in os.environ else './' data_path = os.path.join(prefix, 'opt/ml/input/data') train_path" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "python # This work was created by participants in the DataONE project, and", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "License. # You may obtain a copy of the License at # #", "the specific language governing permissions and # limitations under the License. \"\"\"Generate random", "format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\") ) self._format_id_list = sorted(format_id_set) return random.choice(self._format_id_list) generate = Generate()", "law or agreed to in writing, software # distributed under the License is", "the DataONE project, and is # jointly copyrighted by participating institutions in DataONE.", "\"objectFormatList_v2_0.xml\" ).objectFormat } # Remove the formatIds for object types that are parsed", "the License for the specific language governing permissions and # limitations under the", "our web site at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed", "compliance with the License. # You may obtain a copy of the License", "#!/usr/bin/env python # This work was created by participants in the DataONE project,", "the formatIds for object types that are parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -=", "} # Remove the formatIds for object types that are parsed by GMN", "the License. \"\"\"Generate random formatId.\"\"\" import random import d1_common.const import d1_test.test_files class Generate(object):", "# # Copyright 2009-2019 DataONE # # Licensed under the Apache License, Version", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "and is # jointly copyrighted by participating institutions in DataONE. For # more", "this file except in compliance with the License. # You may obtain a", "class Generate(object): def __init__(self): self._format_id_list = None def __call__(self): if self._format_id_list is None:", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "This work was created by participants in the DataONE project, and is #", "you may not use this file except in compliance with the License. #", "governing permissions and # limitations under the License. \"\"\"Generate random formatId.\"\"\" import random", "project, and is # jointly copyrighted by participating institutions in DataONE. For #", "for the specific language governing permissions and # limitations under the License. \"\"\"Generate", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "2009-2019 DataONE # # Licensed under the Apache License, Version 2.0 (the \"License\");", "formatId.\"\"\" import random import d1_common.const import d1_test.test_files class Generate(object): def __init__(self): self._format_id_list =", "in the DataONE project, and is # jointly copyrighted by participating institutions in", "more information on DataONE, see our web site at http://dataone.org. # # Copyright", "ANY KIND, either express or implied. # See the License for the specific", "see our web site at http://dataone.org. # # Copyright 2009-2019 DataONE # #", "limitations under the License. \"\"\"Generate random formatId.\"\"\" import random import d1_common.const import d1_test.test_files", "by participating institutions in DataONE. For # more information on DataONE, see our", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "= { o.formatId for o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat } # Remove the", "use this file except in compliance with the License. # You may obtain", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat } # Remove the formatIds for object types", "# Remove the formatIds for object types that are parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID)", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "__call__(self): if self._format_id_list is None: format_id_set = { o.formatId for o in d1_test.test_files.load_xml_to_pyxb(", "See the License for the specific language governing permissions and # limitations under", "# This work was created by participants in the DataONE project, and is", "institutions in DataONE. For # more information on DataONE, see our web site", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "site at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under the", "specific language governing permissions and # limitations under the License. \"\"\"Generate random formatId.\"\"\"", "DataONE project, and is # jointly copyrighted by participating institutions in DataONE. For", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "for o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat } # Remove the formatIds for object", "for object types that are parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\")", "DataONE, see our web site at http://dataone.org. # # Copyright 2009-2019 DataONE #", "OF ANY KIND, either express or implied. # See the License for the", "participants in the DataONE project, and is # jointly copyrighted by participating institutions", ").objectFormat } # Remove the formatIds for object types that are parsed by", "2.0 (the \"License\"); # you may not use this file except in compliance", "# you may not use this file except in compliance with the License.", "copyrighted by participating institutions in DataONE. For # more information on DataONE, see", "and # limitations under the License. \"\"\"Generate random formatId.\"\"\" import random import d1_common.const", "by participants in the DataONE project, and is # jointly copyrighted by participating", "agreed to in writing, software # distributed under the License is distributed on", "on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2019 DataONE", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "permissions and # limitations under the License. \"\"\"Generate random formatId.\"\"\" import random import", "Remove the formatIds for object types that are parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set", "(the \"License\"); # you may not use this file except in compliance with", "random formatId.\"\"\" import random import d1_common.const import d1_test.test_files class Generate(object): def __init__(self): self._format_id_list", "in DataONE. For # more information on DataONE, see our web site at", "# # Unless required by applicable law or agreed to in writing, software", "self._format_id_list is None: format_id_set = { o.formatId for o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat", "def __init__(self): self._format_id_list = None def __call__(self): if self._format_id_list is None: format_id_set =", "# jointly copyrighted by participating institutions in DataONE. For # more information on", "express or implied. # See the License for the specific language governing permissions", "parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\") ) self._format_id_list = sorted(format_id_set) return", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "self._format_id_list = None def __call__(self): if self._format_id_list is None: format_id_set = { o.formatId", "except in compliance with the License. # You may obtain a copy of", "web site at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under", "by applicable law or agreed to in writing, software # distributed under the", "if self._format_id_list is None: format_id_set = { o.formatId for o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\"", "types that are parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\") ) self._format_id_list", "For # more information on DataONE, see our web site at http://dataone.org. #", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "__init__(self): self._format_id_list = None def __call__(self): if self._format_id_list is None: format_id_set = {", "at http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under the Apache", "either express or implied. # See the License for the specific language governing", "formatIds for object types that are parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set(", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "Copyright 2009-2019 DataONE # # Licensed under the Apache License, Version 2.0 (the", "may not use this file except in compliance with the License. # You", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat } # Remove the formatIds for object types that are", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "= None def __call__(self): if self._format_id_list is None: format_id_set = { o.formatId for", "o.formatId for o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat } # Remove the formatIds for", "are parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\") ) self._format_id_list = sorted(format_id_set)", "by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\") ) self._format_id_list = sorted(format_id_set) return random.choice(self._format_id_list)", "file except in compliance with the License. # You may obtain a copy", "http://dataone.org. # # Copyright 2009-2019 DataONE # # Licensed under the Apache License,", "language governing permissions and # limitations under the License. \"\"\"Generate random formatId.\"\"\" import", "import d1_test.test_files class Generate(object): def __init__(self): self._format_id_list = None def __call__(self): if self._format_id_list", "None: format_id_set = { o.formatId for o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat } #", "is None: format_id_set = { o.formatId for o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat }", "# limitations under the License. \"\"\"Generate random formatId.\"\"\" import random import d1_common.const import", "Generate(object): def __init__(self): self._format_id_list = None def __call__(self): if self._format_id_list is None: format_id_set", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "under the License. \"\"\"Generate random formatId.\"\"\" import random import d1_common.const import d1_test.test_files class", "License for the specific language governing permissions and # limitations under the License.", "{ o.formatId for o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat } # Remove the formatIds", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# Copyright 2009-2019 DataONE # # Licensed under the Apache License, Version 2.0", "participating institutions in DataONE. For # more information on DataONE, see our web", "the License. # You may obtain a copy of the License at #", "None def __call__(self): if self._format_id_list is None: format_id_set = { o.formatId for o", "d1_test.test_files class Generate(object): def __init__(self): self._format_id_list = None def __call__(self): if self._format_id_list is", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "work was created by participants in the DataONE project, and is # jointly", "object types that are parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\") )", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\") ) self._format_id_list = sorted(format_id_set) return random.choice(self._format_id_list) generate", "implied. # See the License for the specific language governing permissions and #", "# more information on DataONE, see our web site at http://dataone.org. # #", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "import random import d1_common.const import d1_test.test_files class Generate(object): def __init__(self): self._format_id_list = None", "import d1_common.const import d1_test.test_files class Generate(object): def __init__(self): self._format_id_list = None def __call__(self):", "applicable law or agreed to in writing, software # distributed under the License", "that are parsed by GMN format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\") ) self._format_id_list =", "is # jointly copyrighted by participating institutions in DataONE. For # more information", "DataONE. For # more information on DataONE, see our web site at http://dataone.org.", "created by participants in the DataONE project, and is # jointly copyrighted by", "\"\"\"Generate random formatId.\"\"\" import random import d1_common.const import d1_test.test_files class Generate(object): def __init__(self):", "DataONE # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "def __call__(self): if self._format_id_list is None: format_id_set = { o.formatId for o in", "or agreed to in writing, software # distributed under the License is distributed", "d1_common.const import d1_test.test_files class Generate(object): def __init__(self): self._format_id_list = None def __call__(self): if", "or implied. # See the License for the specific language governing permissions and", "information on DataONE, see our web site at http://dataone.org. # # Copyright 2009-2019", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "jointly copyrighted by participating institutions in DataONE. For # more information on DataONE,", "was created by participants in the DataONE project, and is # jointly copyrighted", "with the License. # You may obtain a copy of the License at", "in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat } # Remove the formatIds for object types that", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "License. \"\"\"Generate random formatId.\"\"\" import random import d1_common.const import d1_test.test_files class Generate(object): def", "format_id_set.remove(d1_common.const.ORE_FORMAT_ID) format_id_set -= set( d1_test.test_files.load_json(\"scimeta_format_id_list.json\") ) self._format_id_list = sorted(format_id_set) return random.choice(self._format_id_list) generate =", "random import d1_common.const import d1_test.test_files class Generate(object): def __init__(self): self._format_id_list = None def", "format_id_set = { o.formatId for o in d1_test.test_files.load_xml_to_pyxb( \"objectFormatList_v2_0.xml\" ).objectFormat } # Remove", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "a wall at x=1.1m.\"\"\" # Create robot q_init = np.array([[math.pi / 2], [-math.pi", "(3N in positive x-direction) f_des = np.array([[3.0], [0.0], [0.0], [0.0], [0.0], [0.0]]) controller.set_force_target(f_des)", "in task space (3N in positive x-direction) f_des = np.array([[3.0], [0.0], [0.0], [0.0],", "controller = Control(robot) # Create desired force in task space (3N in positive", "# Create desired force in task space (3N in positive x-direction) f_des =", "[0.0], [0.0]]) controller.set_force_target(f_des) # Run animation joint_of_interest = 1 sim = Simulator(robot, controller,", "control and simulates contact with a wall at x=1.1m.\"\"\" # Create robot q_init", "src.controllers.task_space_force_controller import * \"\"\" This script uses force control and simulates contact with", "np.array([[math.pi / 2], [-math.pi / 2]]) robot = TwoDofArm() robot.set_q_init(q_init) # Create controller", "script uses force control and simulates contact with a wall at x=1.1m.\"\"\" #", "* from src.arms import * from src.controllers.task_space_force_controller import * \"\"\" This script uses", "import * from src.controllers.task_space_force_controller import * \"\"\" This script uses force control and", "/ 2]]) robot = TwoDofArm() robot.set_q_init(q_init) # Create controller controller = Control(robot) #", "TwoDofArm() robot.set_q_init(q_init) # Create controller controller = Control(robot) # Create desired force in", "desired force in task space (3N in positive x-direction) f_des = np.array([[3.0], [0.0],", "task space (3N in positive x-direction) f_des = np.array([[3.0], [0.0], [0.0], [0.0], [0.0],", "import * from src.arms import * from src.controllers.task_space_force_controller import * \"\"\" This script", "space (3N in positive x-direction) f_des = np.array([[3.0], [0.0], [0.0], [0.0], [0.0], [0.0]])", "Create desired force in task space (3N in positive x-direction) f_des = np.array([[3.0],", "= Control(robot) # Create desired force in task space (3N in positive x-direction)", "This script uses force control and simulates contact with a wall at x=1.1m.\"\"\"", "x-direction) f_des = np.array([[3.0], [0.0], [0.0], [0.0], [0.0], [0.0]]) controller.set_force_target(f_des) # Run animation", "with a wall at x=1.1m.\"\"\" # Create robot q_init = np.array([[math.pi / 2],", "np.array([[3.0], [0.0], [0.0], [0.0], [0.0], [0.0]]) controller.set_force_target(f_des) # Run animation joint_of_interest = 1", "from src.controllers.task_space_force_controller import * \"\"\" This script uses force control and simulates contact", "/ 2], [-math.pi / 2]]) robot = TwoDofArm() robot.set_q_init(q_init) # Create controller controller", "at x=1.1m.\"\"\" # Create robot q_init = np.array([[math.pi / 2], [-math.pi / 2]])", "and simulates contact with a wall at x=1.1m.\"\"\" # Create robot q_init =", "x=1.1m.\"\"\" # Create robot q_init = np.array([[math.pi / 2], [-math.pi / 2]]) robot", "controller.set_force_target(f_des) # Run animation joint_of_interest = 1 sim = Simulator(robot, controller, joint_of_interest) sim.simulate()", "Control(robot) # Create desired force in task space (3N in positive x-direction) f_des", "robot = TwoDofArm() robot.set_q_init(q_init) # Create controller controller = Control(robot) # Create desired", "import math from src.Simulator import * from src.arms import * from src.controllers.task_space_force_controller import", "* \"\"\" This script uses force control and simulates contact with a wall", "from src.arms import * from src.controllers.task_space_force_controller import * \"\"\" This script uses force", "2], [-math.pi / 2]]) robot = TwoDofArm() robot.set_q_init(q_init) # Create controller controller =", "robot.set_q_init(q_init) # Create controller controller = Control(robot) # Create desired force in task", "[0.0], [0.0], [0.0]]) controller.set_force_target(f_des) # Run animation joint_of_interest = 1 sim = Simulator(robot,", "\"\"\" This script uses force control and simulates contact with a wall at", "[0.0], [0.0], [0.0], [0.0]]) controller.set_force_target(f_des) # Run animation joint_of_interest = 1 sim =", "positive x-direction) f_des = np.array([[3.0], [0.0], [0.0], [0.0], [0.0], [0.0]]) controller.set_force_target(f_des) # Run", "src.Simulator import * from src.arms import * from src.controllers.task_space_force_controller import * \"\"\" This", "robot q_init = np.array([[math.pi / 2], [-math.pi / 2]]) robot = TwoDofArm() robot.set_q_init(q_init)", "contact with a wall at x=1.1m.\"\"\" # Create robot q_init = np.array([[math.pi /", "Create robot q_init = np.array([[math.pi / 2], [-math.pi / 2]]) robot = TwoDofArm()", "import * \"\"\" This script uses force control and simulates contact with a", "# Create controller controller = Control(robot) # Create desired force in task space", "2]]) robot = TwoDofArm() robot.set_q_init(q_init) # Create controller controller = Control(robot) # Create", "# Create robot q_init = np.array([[math.pi / 2], [-math.pi / 2]]) robot =", "* from src.controllers.task_space_force_controller import * \"\"\" This script uses force control and simulates", "wall at x=1.1m.\"\"\" # Create robot q_init = np.array([[math.pi / 2], [-math.pi /", "force control and simulates contact with a wall at x=1.1m.\"\"\" # Create robot", "from src.Simulator import * from src.arms import * from src.controllers.task_space_force_controller import * \"\"\"", "in positive x-direction) f_des = np.array([[3.0], [0.0], [0.0], [0.0], [0.0], [0.0]]) controller.set_force_target(f_des) #", "force in task space (3N in positive x-direction) f_des = np.array([[3.0], [0.0], [0.0],", "f_des = np.array([[3.0], [0.0], [0.0], [0.0], [0.0], [0.0]]) controller.set_force_target(f_des) # Run animation joint_of_interest", "[-math.pi / 2]]) robot = TwoDofArm() robot.set_q_init(q_init) # Create controller controller = Control(robot)", "src.arms import * from src.controllers.task_space_force_controller import * \"\"\" This script uses force control", "Create controller controller = Control(robot) # Create desired force in task space (3N", "simulates contact with a wall at x=1.1m.\"\"\" # Create robot q_init = np.array([[math.pi", "[0.0], [0.0], [0.0], [0.0], [0.0]]) controller.set_force_target(f_des) # Run animation joint_of_interest = 1 sim", "math from src.Simulator import * from src.arms import * from src.controllers.task_space_force_controller import *", "controller controller = Control(robot) # Create desired force in task space (3N in", "[0.0]]) controller.set_force_target(f_des) # Run animation joint_of_interest = 1 sim = Simulator(robot, controller, joint_of_interest)", "q_init = np.array([[math.pi / 2], [-math.pi / 2]]) robot = TwoDofArm() robot.set_q_init(q_init) #", "= np.array([[math.pi / 2], [-math.pi / 2]]) robot = TwoDofArm() robot.set_q_init(q_init) # Create", "= np.array([[3.0], [0.0], [0.0], [0.0], [0.0], [0.0]]) controller.set_force_target(f_des) # Run animation joint_of_interest =", "uses force control and simulates contact with a wall at x=1.1m.\"\"\" # Create", "= TwoDofArm() robot.set_q_init(q_init) # Create controller controller = Control(robot) # Create desired force" ]
[ "get laplacian matrix: L = csgraph.laplacian(A, normed=True) # spectral decomposition: eigval, eigvec =", "clusters Attributes ---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2, **kwargs): self.__K = n_clusters", "data: numpy.ndarray Training set as N-by-D numpy.ndarray Returns ---------- None \"\"\" # TODO", "range(K): plt.scatter(X[category == k][:,0], X[category == k][:,1], c=color[k], label=labels[k]) plt.xlabel('X') plt.ylabel('Y') plt.legend() plt.title('Spectral", "of clusters Attributes ---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2, **kwargs): self.__K =", "---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2, **kwargs): self.__K = n_clusters self.__labels =", "as np from sklearn.cluster import KMeans import matplotlib.pyplot as plt class SpectralClustering(object): \"\"\"", "---------- result: numpy.ndarray data labels as (N, ) numpy.ndarray \"\"\" return np.copy(self.__labels) def", "random_state=42, visualize=False): \"\"\" Generate dataset for spectral clustering Parameters ---------- visualize: boolean Whether", "from sklearn.datasets import make_moons X, y = make_moons(N, noise=noise, random_state=random_state) if visualize: fig,", "Returns ---------- result: numpy.ndarray data labels as (N, ) numpy.ndarray \"\"\" return np.copy(self.__labels)", "features: idx_k_smallest = np.where(eigval < np.partition(eigval, self.__K)[self.__K]) features = np.hstack([eigvec[:, i] for i", "set as N-by-D numpy.ndarray Returns ---------- result: numpy.ndarray data labels as (N, )", "Parameters ---------- visualize: boolean Whether to visualize the generated data \"\"\" from sklearn.datasets", "plt.show() return X if __name__ == '__main__': # create dataset: K = 2", "Training set as N-by-D numpy.ndarray Returns ---------- None \"\"\" # TODO 01: implement", "cluster using KMeans++ k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features) # get cluster ids:", "implement SpectralClustering fit from sklearn.neighbors import kneighbors_graph from sklearn.metrics import pairwise_distances from scipy.sparse", "self.__K = n_clusters self.__labels = None def fit(self, data): \"\"\" Estimate the K", "random_state=random_state) if visualize: fig, ax = plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for Spectral Clustering', fontsize=18,", "<filename>Homework/Homework III/src/SpectralClustering.py<gh_stars>1-10 # 文件功能: 实现 Spectral Clustering 算法 import numpy as np from", "# visualize: color = ['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}' for k in range(K)] for", "dataset for spectral clustering Parameters ---------- visualize: boolean Whether to visualize the generated", "matplotlib.pyplot as plt class SpectralClustering(object): \"\"\" SpectralClustering Parameters ---------- n_clusters: int Number of", "the K centroids Parameters ---------- data: numpy.ndarray Training set as N-by-D numpy.ndarray Returns", "if __name__ == '__main__': # create dataset: K = 2 X = generate_dataset(visualize=False)", "result: numpy.ndarray data labels as (N, ) numpy.ndarray \"\"\" return np.copy(self.__labels) def generate_dataset(N=300,", "scipy.sparse import csgraph from scipy.sparse import linalg N, _ = data.shape # create", "---------- visualize: boolean Whether to visualize the generated data \"\"\" from sklearn.datasets import", "A = np.exp(-A**2/(2*gamma**2)) # get laplacian matrix: L = csgraph.laplacian(A, normed=True) # spectral", "# cluster using KMeans++ k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features) # get cluster", "sklearn.cluster import KMeans import matplotlib.pyplot as plt class SpectralClustering(object): \"\"\" SpectralClustering Parameters ----------", "cluster ids: self.__labels = k_means.labels_ def predict(self, data): \"\"\" Get cluster labels Parameters", "numpy.ndarray Returns ---------- result: numpy.ndarray data labels as (N, ) numpy.ndarray \"\"\" return", "for k in range(K)] for k in range(K): plt.scatter(X[category == k][:,0], X[category ==", "get cluster ids: self.__labels = k_means.labels_ def predict(self, data): \"\"\" Get cluster labels", "set as N-by-D numpy.ndarray Returns ---------- None \"\"\" # TODO 01: implement SpectralClustering", "None \"\"\" # TODO 01: implement SpectralClustering fit from sklearn.neighbors import kneighbors_graph from", "(N, ) numpy.ndarray \"\"\" return np.copy(self.__labels) def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False): \"\"\" Generate", "the generated data \"\"\" from sklearn.datasets import make_moons X, y = make_moons(N, noise=noise,", "from sklearn.neighbors import kneighbors_graph from sklearn.metrics import pairwise_distances from scipy.sparse import csgraph from", "generate_dataset(N=300, noise=0.07, random_state=42, visualize=False): \"\"\" Generate dataset for spectral clustering Parameters ---------- visualize:", "n_clusters=2, **kwargs): self.__K = n_clusters self.__labels = None def fit(self, data): \"\"\" Estimate", "csgraph from scipy.sparse import linalg N, _ = data.shape # create affinity matrix", "import KMeans import matplotlib.pyplot as plt class SpectralClustering(object): \"\"\" SpectralClustering Parameters ---------- n_clusters:", "# k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2, **kwargs): self.__K = n_clusters self.__labels = None def", "numpy.ndarray Training set as N-by-D numpy.ndarray Returns ---------- None \"\"\" # TODO 01:", "SpectralClustering(object): \"\"\" SpectralClustering Parameters ---------- n_clusters: int Number of clusters Attributes ---------- \"\"\"", "np.var(A)/4 A = np.exp(-A**2/(2*gamma**2)) # get laplacian matrix: L = csgraph.laplacian(A, normed=True) #", "ax = plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for Spectral Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:, 0], X[:,", "estimation gamma = np.var(A)/4 A = np.exp(-A**2/(2*gamma**2)) # get laplacian matrix: L =", "plt.scatter(X[category == k][:,0], X[category == k][:,1], c=color[k], label=labels[k]) plt.xlabel('X') plt.ylabel('Y') plt.legend() plt.title('Spectral Clustering", "clustering estimation: sc = SpectralClustering(n_clusters=K) sc.fit(X) category = sc.predict(X) # visualize: color =", "# create dataset: K = 2 X = generate_dataset(visualize=False) # spectral clustering estimation:", "\"\"\" SpectralClustering Parameters ---------- n_clusters: int Number of clusters Attributes ---------- \"\"\" #", "= pairwise_distances(data) # TODO: use better gamma estimation gamma = np.var(A)/4 A =", "cmap='viridis') plt.show() return X if __name__ == '__main__': # create dataset: K =", "create dataset: K = 2 X = generate_dataset(visualize=False) # spectral clustering estimation: sc", "= generate_dataset(visualize=False) # spectral clustering estimation: sc = SpectralClustering(n_clusters=K) sc.fit(X) category = sc.predict(X)", "noise=0.07, random_state=42, visualize=False): \"\"\" Generate dataset for spectral clustering Parameters ---------- visualize: boolean", "for connectivity: A = pairwise_distances(data) # TODO: use better gamma estimation gamma =", "= np.where(eigval < np.partition(eigval, self.__K)[self.__K]) features = np.hstack([eigvec[:, i] for i in idx_k_smallest])", "fit(self, data): \"\"\" Estimate the K centroids Parameters ---------- data: numpy.ndarray Training set", "import linalg N, _ = data.shape # create affinity matrix -- kNN for", "np.copy(self.__labels) def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False): \"\"\" Generate dataset for spectral clustering Parameters", "KMeans import matplotlib.pyplot as plt class SpectralClustering(object): \"\"\" SpectralClustering Parameters ---------- n_clusters: int", "Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='viridis') plt.show() return X", "A = pairwise_distances(data) # TODO: use better gamma estimation gamma = np.var(A)/4 A", "Testing set as N-by-D numpy.ndarray Returns ---------- result: numpy.ndarray data labels as (N,", "for Spectral Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='viridis') plt.show()", "self.__K)[self.__K]) features = np.hstack([eigvec[:, i] for i in idx_k_smallest]) # cluster using KMeans++", "__name__ == '__main__': # create dataset: K = 2 X = generate_dataset(visualize=False) #", "= csgraph.laplacian(A, normed=True) # spectral decomposition: eigval, eigvec = np.linalg.eig(L) # get features:", "category = sc.predict(X) # visualize: color = ['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}' for k", "n_clusters self.__labels = None def fit(self, data): \"\"\" Estimate the K centroids Parameters", "III/src/SpectralClustering.py<gh_stars>1-10 # 文件功能: 实现 Spectral Clustering 算法 import numpy as np from sklearn.cluster", "def __init__(self, n_clusters=2, **kwargs): self.__K = n_clusters self.__labels = None def fit(self, data):", "clustering Parameters ---------- visualize: boolean Whether to visualize the generated data \"\"\" from", "L = csgraph.laplacian(A, normed=True) # spectral decomposition: eigval, eigvec = np.linalg.eig(L) # get", "color = ['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}' for k in range(K)] for k in", "gamma = np.var(A)/4 A = np.exp(-A**2/(2*gamma**2)) # get laplacian matrix: L = csgraph.laplacian(A,", "in idx_k_smallest]) # cluster using KMeans++ k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features) #", "self.__labels = None def fit(self, data): \"\"\" Estimate the K centroids Parameters ----------", "from sklearn.metrics import pairwise_distances from scipy.sparse import csgraph from scipy.sparse import linalg N,", "# TODO: use better gamma estimation gamma = np.var(A)/4 A = np.exp(-A**2/(2*gamma**2)) #", "import csgraph from scipy.sparse import linalg N, _ = data.shape # create affinity", "---------- None \"\"\" # TODO 01: implement SpectralClustering fit from sklearn.neighbors import kneighbors_graph", "# create affinity matrix -- kNN for connectivity: A = pairwise_distances(data) # TODO:", "kNN for connectivity: A = pairwise_distances(data) # TODO: use better gamma estimation gamma", "get features: idx_k_smallest = np.where(eigval < np.partition(eigval, self.__K)[self.__K]) features = np.hstack([eigvec[:, i] for", "if visualize: fig, ax = plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for Spectral Clustering', fontsize=18, fontweight='demi')", "import matplotlib.pyplot as plt class SpectralClustering(object): \"\"\" SpectralClustering Parameters ---------- n_clusters: int Number", "affinity matrix -- kNN for connectivity: A = pairwise_distances(data) # TODO: use better", "sklearn.metrics import pairwise_distances from scipy.sparse import csgraph from scipy.sparse import linalg N, _", "cluster labels Parameters ---------- data: numpy.ndarray Testing set as N-by-D numpy.ndarray Returns ----------", "文件功能: 实现 Spectral Clustering 算法 import numpy as np from sklearn.cluster import KMeans", "idx_k_smallest = np.where(eigval < np.partition(eigval, self.__K)[self.__K]) features = np.hstack([eigvec[:, i] for i in", "N-by-D numpy.ndarray Returns ---------- None \"\"\" # TODO 01: implement SpectralClustering fit from", "return X if __name__ == '__main__': # create dataset: K = 2 X", "Clustering 算法 import numpy as np from sklearn.cluster import KMeans import matplotlib.pyplot as", "np from sklearn.cluster import KMeans import matplotlib.pyplot as plt class SpectralClustering(object): \"\"\" SpectralClustering", "def fit(self, data): \"\"\" Estimate the K centroids Parameters ---------- data: numpy.ndarray Training", "for i in idx_k_smallest]) # cluster using KMeans++ k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6)", "ax.set_title('Test Dataset for Spectral Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:, 0], X[:, 1], c=y, s=50,", "= SpectralClustering(n_clusters=K) sc.fit(X) category = sc.predict(X) # visualize: color = ['red','blue','green','cyan','magenta'] labels =", "KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features) # get cluster ids: self.__labels = k_means.labels_ def predict(self,", "visualize: color = ['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}' for k in range(K)] for k", "spectral clustering estimation: sc = SpectralClustering(n_clusters=K) sc.fit(X) category = sc.predict(X) # visualize: color", "---------- n_clusters: int Number of clusters Attributes ---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self,", "K = 2 X = generate_dataset(visualize=False) # spectral clustering estimation: sc = SpectralClustering(n_clusters=K)", "pairwise_distances(data) # TODO: use better gamma estimation gamma = np.var(A)/4 A = np.exp(-A**2/(2*gamma**2))", "matrix -- kNN for connectivity: A = pairwise_distances(data) # TODO: use better gamma", "< np.partition(eigval, self.__K)[self.__K]) features = np.hstack([eigvec[:, i] for i in idx_k_smallest]) # cluster", "Whether to visualize the generated data \"\"\" from sklearn.datasets import make_moons X, y", "= 2 X = generate_dataset(visualize=False) # spectral clustering estimation: sc = SpectralClustering(n_clusters=K) sc.fit(X)", "= [f'Cluster{k:02d}' for k in range(K)] for k in range(K): plt.scatter(X[category == k][:,0],", "= np.var(A)/4 A = np.exp(-A**2/(2*gamma**2)) # get laplacian matrix: L = csgraph.laplacian(A, normed=True)", "numpy as np from sklearn.cluster import KMeans import matplotlib.pyplot as plt class SpectralClustering(object):", "= KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features) # get cluster ids: self.__labels = k_means.labels_ def", "实现 Spectral Clustering 算法 import numpy as np from sklearn.cluster import KMeans import", "N-by-D numpy.ndarray Returns ---------- result: numpy.ndarray data labels as (N, ) numpy.ndarray \"\"\"", "numpy.ndarray \"\"\" return np.copy(self.__labels) def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False): \"\"\" Generate dataset for", "\"\"\" Generate dataset for spectral clustering Parameters ---------- visualize: boolean Whether to visualize", "predict(self, data): \"\"\" Get cluster labels Parameters ---------- data: numpy.ndarray Testing set as", "data): \"\"\" Get cluster labels Parameters ---------- data: numpy.ndarray Testing set as N-by-D", "X if __name__ == '__main__': # create dataset: K = 2 X =", "ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='viridis') plt.show() return X if __name__ ==", "numpy.ndarray data labels as (N, ) numpy.ndarray \"\"\" return np.copy(self.__labels) def generate_dataset(N=300, noise=0.07,", "from scipy.sparse import csgraph from scipy.sparse import linalg N, _ = data.shape #", "SpectralClustering Parameters ---------- n_clusters: int Number of clusters Attributes ---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数", "scipy.sparse import linalg N, _ = data.shape # create affinity matrix -- kNN", "Get cluster labels Parameters ---------- data: numpy.ndarray Testing set as N-by-D numpy.ndarray Returns", "sklearn.datasets import make_moons X, y = make_moons(N, noise=noise, random_state=random_state) if visualize: fig, ax", "plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for Spectral Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:, 0], X[:, 1], c=y,", "Attributes ---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2, **kwargs): self.__K = n_clusters self.__labels", "sklearn.neighbors import kneighbors_graph from sklearn.metrics import pairwise_distances from scipy.sparse import csgraph from scipy.sparse", "import pairwise_distances from scipy.sparse import csgraph from scipy.sparse import linalg N, _ =", "k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2, **kwargs): self.__K = n_clusters self.__labels = None def fit(self,", "y = make_moons(N, noise=noise, random_state=random_state) if visualize: fig, ax = plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset", "= ['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}' for k in range(K)] for k in range(K):", "for spectral clustering Parameters ---------- visualize: boolean Whether to visualize the generated data", "spectral decomposition: eigval, eigvec = np.linalg.eig(L) # get features: idx_k_smallest = np.where(eigval <", "eigvec = np.linalg.eig(L) # get features: idx_k_smallest = np.where(eigval < np.partition(eigval, self.__K)[self.__K]) features", "kneighbors_graph from sklearn.metrics import pairwise_distances from scipy.sparse import csgraph from scipy.sparse import linalg", "# spectral decomposition: eigval, eigvec = np.linalg.eig(L) # get features: idx_k_smallest = np.where(eigval", "Number of clusters Attributes ---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2, **kwargs): self.__K", "\"\"\" from sklearn.datasets import make_moons X, y = make_moons(N, noise=noise, random_state=random_state) if visualize:", "fontsize=18, fontweight='demi') ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='viridis') plt.show() return X if", "create affinity matrix -- kNN for connectivity: A = pairwise_distances(data) # TODO: use", "centroids Parameters ---------- data: numpy.ndarray Training set as N-by-D numpy.ndarray Returns ---------- None", "Estimate the K centroids Parameters ---------- data: numpy.ndarray Training set as N-by-D numpy.ndarray", "---------- data: numpy.ndarray Testing set as N-by-D numpy.ndarray Returns ---------- result: numpy.ndarray data", "X[:, 1], c=y, s=50, cmap='viridis') plt.show() return X if __name__ == '__main__': #", "labels = [f'Cluster{k:02d}' for k in range(K)] for k in range(K): plt.scatter(X[category ==", "= make_moons(N, noise=noise, random_state=random_state) if visualize: fig, ax = plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for", "TODO: use better gamma estimation gamma = np.var(A)/4 A = np.exp(-A**2/(2*gamma**2)) # get", "data: numpy.ndarray Testing set as N-by-D numpy.ndarray Returns ---------- result: numpy.ndarray data labels", "TODO 01: implement SpectralClustering fit from sklearn.neighbors import kneighbors_graph from sklearn.metrics import pairwise_distances", "from scipy.sparse import linalg N, _ = data.shape # create affinity matrix --", "np.exp(-A**2/(2*gamma**2)) # get laplacian matrix: L = csgraph.laplacian(A, normed=True) # spectral decomposition: eigval,", "make_moons X, y = make_moons(N, noise=noise, random_state=random_state) if visualize: fig, ax = plt.subplots(figsize=(16,9))", "# 文件功能: 实现 Spectral Clustering 算法 import numpy as np from sklearn.cluster import", "\"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2, **kwargs): self.__K = n_clusters self.__labels = None", "features = np.hstack([eigvec[:, i] for i in idx_k_smallest]) # cluster using KMeans++ k_means", "idx_k_smallest]) # cluster using KMeans++ k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features) # get", "visualize: fig, ax = plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for Spectral Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:,", "generate_dataset(visualize=False) # spectral clustering estimation: sc = SpectralClustering(n_clusters=K) sc.fit(X) category = sc.predict(X) #", "as N-by-D numpy.ndarray Returns ---------- result: numpy.ndarray data labels as (N, ) numpy.ndarray", "in range(K)] for k in range(K): plt.scatter(X[category == k][:,0], X[category == k][:,1], c=color[k],", "i] for i in idx_k_smallest]) # cluster using KMeans++ k_means = KMeans(init='k-means++', n_clusters=self.__K,", "s=50, cmap='viridis') plt.show() return X if __name__ == '__main__': # create dataset: K", "data labels as (N, ) numpy.ndarray \"\"\" return np.copy(self.__labels) def generate_dataset(N=300, noise=0.07, random_state=42,", "as plt class SpectralClustering(object): \"\"\" SpectralClustering Parameters ---------- n_clusters: int Number of clusters", "-- kNN for connectivity: A = pairwise_distances(data) # TODO: use better gamma estimation", "matrix: L = csgraph.laplacian(A, normed=True) # spectral decomposition: eigval, eigvec = np.linalg.eig(L) #", "k][:,0], X[category == k][:,1], c=color[k], label=labels[k]) plt.xlabel('X') plt.ylabel('Y') plt.legend() plt.title('Spectral Clustering Testcase') plt.show()", "labels as (N, ) numpy.ndarray \"\"\" return np.copy(self.__labels) def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False):", "# get features: idx_k_smallest = np.where(eigval < np.partition(eigval, self.__K)[self.__K]) features = np.hstack([eigvec[:, i]", "X, y = make_moons(N, noise=noise, random_state=random_state) if visualize: fig, ax = plt.subplots(figsize=(16,9)) ax.set_title('Test", "tol=1e-6) k_means.fit(features) # get cluster ids: self.__labels = k_means.labels_ def predict(self, data): \"\"\"", "normed=True) # spectral decomposition: eigval, eigvec = np.linalg.eig(L) # get features: idx_k_smallest =", "def predict(self, data): \"\"\" Get cluster labels Parameters ---------- data: numpy.ndarray Testing set", "\"\"\" return np.copy(self.__labels) def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False): \"\"\" Generate dataset for spectral", "01: implement SpectralClustering fit from sklearn.neighbors import kneighbors_graph from sklearn.metrics import pairwise_distances from", "def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False): \"\"\" Generate dataset for spectral clustering Parameters ----------", "Generate dataset for spectral clustering Parameters ---------- visualize: boolean Whether to visualize the", "Parameters ---------- n_clusters: int Number of clusters Attributes ---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def", "dataset: K = 2 X = generate_dataset(visualize=False) # spectral clustering estimation: sc =", "= np.hstack([eigvec[:, i] for i in idx_k_smallest]) # cluster using KMeans++ k_means =", "using KMeans++ k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features) # get cluster ids: self.__labels", "labels Parameters ---------- data: numpy.ndarray Testing set as N-by-D numpy.ndarray Returns ---------- result:", "pairwise_distances from scipy.sparse import csgraph from scipy.sparse import linalg N, _ = data.shape", "# get laplacian matrix: L = csgraph.laplacian(A, normed=True) # spectral decomposition: eigval, eigvec", "from sklearn.cluster import KMeans import matplotlib.pyplot as plt class SpectralClustering(object): \"\"\" SpectralClustering Parameters", "= None def fit(self, data): \"\"\" Estimate the K centroids Parameters ---------- data:", "_ = data.shape # create affinity matrix -- kNN for connectivity: A =", "i in idx_k_smallest]) # cluster using KMeans++ k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features)", "numpy.ndarray Returns ---------- None \"\"\" # TODO 01: implement SpectralClustering fit from sklearn.neighbors", "visualize=False): \"\"\" Generate dataset for spectral clustering Parameters ---------- visualize: boolean Whether to", "visualize the generated data \"\"\" from sklearn.datasets import make_moons X, y = make_moons(N,", "import make_moons X, y = make_moons(N, noise=noise, random_state=random_state) if visualize: fig, ax =", "k_means.fit(features) # get cluster ids: self.__labels = k_means.labels_ def predict(self, data): \"\"\" Get", "['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}' for k in range(K)] for k in range(K): plt.scatter(X[category", "data.shape # create affinity matrix -- kNN for connectivity: A = pairwise_distances(data) #", "laplacian matrix: L = csgraph.laplacian(A, normed=True) # spectral decomposition: eigval, eigvec = np.linalg.eig(L)", "# spectral clustering estimation: sc = SpectralClustering(n_clusters=K) sc.fit(X) category = sc.predict(X) # visualize:", "None def fit(self, data): \"\"\" Estimate the K centroids Parameters ---------- data: numpy.ndarray", "int Number of clusters Attributes ---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2, **kwargs):", "for k in range(K): plt.scatter(X[category == k][:,0], X[category == k][:,1], c=color[k], label=labels[k]) plt.xlabel('X')", "import numpy as np from sklearn.cluster import KMeans import matplotlib.pyplot as plt class", "gamma estimation gamma = np.var(A)/4 A = np.exp(-A**2/(2*gamma**2)) # get laplacian matrix: L", "csgraph.laplacian(A, normed=True) # spectral decomposition: eigval, eigvec = np.linalg.eig(L) # get features: idx_k_smallest", "k in range(K): plt.scatter(X[category == k][:,0], X[category == k][:,1], c=color[k], label=labels[k]) plt.xlabel('X') plt.ylabel('Y')", "Parameters ---------- data: numpy.ndarray Testing set as N-by-D numpy.ndarray Returns ---------- result: numpy.ndarray", "as (N, ) numpy.ndarray \"\"\" return np.copy(self.__labels) def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False): \"\"\"", "__init__(self, n_clusters=2, **kwargs): self.__K = n_clusters self.__labels = None def fit(self, data): \"\"\"", "n_clusters: int Number of clusters Attributes ---------- \"\"\" # k是分组数;tolerance‘中心点误差’;max_iter是迭代次数 def __init__(self, n_clusters=2,", "use better gamma estimation gamma = np.var(A)/4 A = np.exp(-A**2/(2*gamma**2)) # get laplacian", "decomposition: eigval, eigvec = np.linalg.eig(L) # get features: idx_k_smallest = np.where(eigval < np.partition(eigval,", "# get cluster ids: self.__labels = k_means.labels_ def predict(self, data): \"\"\" Get cluster", "算法 import numpy as np from sklearn.cluster import KMeans import matplotlib.pyplot as plt", "k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features) # get cluster ids: self.__labels = k_means.labels_", "SpectralClustering fit from sklearn.neighbors import kneighbors_graph from sklearn.metrics import pairwise_distances from scipy.sparse import", "class SpectralClustering(object): \"\"\" SpectralClustering Parameters ---------- n_clusters: int Number of clusters Attributes ----------", "noise=noise, random_state=random_state) if visualize: fig, ax = plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for Spectral Clustering',", "np.linalg.eig(L) # get features: idx_k_smallest = np.where(eigval < np.partition(eigval, self.__K)[self.__K]) features = np.hstack([eigvec[:,", "\"\"\" Estimate the K centroids Parameters ---------- data: numpy.ndarray Training set as N-by-D", "np.where(eigval < np.partition(eigval, self.__K)[self.__K]) features = np.hstack([eigvec[:, i] for i in idx_k_smallest]) #", "to visualize the generated data \"\"\" from sklearn.datasets import make_moons X, y =", "in range(K): plt.scatter(X[category == k][:,0], X[category == k][:,1], c=color[k], label=labels[k]) plt.xlabel('X') plt.ylabel('Y') plt.legend()", "better gamma estimation gamma = np.var(A)/4 A = np.exp(-A**2/(2*gamma**2)) # get laplacian matrix:", "fit from sklearn.neighbors import kneighbors_graph from sklearn.metrics import pairwise_distances from scipy.sparse import csgraph", "sc = SpectralClustering(n_clusters=K) sc.fit(X) category = sc.predict(X) # visualize: color = ['red','blue','green','cyan','magenta'] labels", "= data.shape # create affinity matrix -- kNN for connectivity: A = pairwise_distances(data)", "c=y, s=50, cmap='viridis') plt.show() return X if __name__ == '__main__': # create dataset:", "connectivity: A = pairwise_distances(data) # TODO: use better gamma estimation gamma = np.var(A)/4", "fontweight='demi') ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='viridis') plt.show() return X if __name__", "plt class SpectralClustering(object): \"\"\" SpectralClustering Parameters ---------- n_clusters: int Number of clusters Attributes", "self.__labels = k_means.labels_ def predict(self, data): \"\"\" Get cluster labels Parameters ---------- data:", "make_moons(N, noise=noise, random_state=random_state) if visualize: fig, ax = plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for Spectral", "---------- data: numpy.ndarray Training set as N-by-D numpy.ndarray Returns ---------- None \"\"\" #", "ids: self.__labels = k_means.labels_ def predict(self, data): \"\"\" Get cluster labels Parameters ----------", "k_means.labels_ def predict(self, data): \"\"\" Get cluster labels Parameters ---------- data: numpy.ndarray Testing", "sc.fit(X) category = sc.predict(X) # visualize: color = ['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}' for", "return np.copy(self.__labels) def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False): \"\"\" Generate dataset for spectral clustering", "visualize: boolean Whether to visualize the generated data \"\"\" from sklearn.datasets import make_moons", "import kneighbors_graph from sklearn.metrics import pairwise_distances from scipy.sparse import csgraph from scipy.sparse import", "**kwargs): self.__K = n_clusters self.__labels = None def fit(self, data): \"\"\" Estimate the", "[f'Cluster{k:02d}' for k in range(K)] for k in range(K): plt.scatter(X[category == k][:,0], X[category", "n_clusters=self.__K, tol=1e-6) k_means.fit(features) # get cluster ids: self.__labels = k_means.labels_ def predict(self, data):", "Parameters ---------- data: numpy.ndarray Training set as N-by-D numpy.ndarray Returns ---------- None \"\"\"", "# TODO 01: implement SpectralClustering fit from sklearn.neighbors import kneighbors_graph from sklearn.metrics import", "sc.predict(X) # visualize: color = ['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}' for k in range(K)]", ") numpy.ndarray \"\"\" return np.copy(self.__labels) def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False): \"\"\" Generate dataset", "K centroids Parameters ---------- data: numpy.ndarray Training set as N-by-D numpy.ndarray Returns ----------", "as N-by-D numpy.ndarray Returns ---------- None \"\"\" # TODO 01: implement SpectralClustering fit", "N, _ = data.shape # create affinity matrix -- kNN for connectivity: A", "boolean Whether to visualize the generated data \"\"\" from sklearn.datasets import make_moons X,", "SpectralClustering(n_clusters=K) sc.fit(X) category = sc.predict(X) # visualize: color = ['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}'", "2 X = generate_dataset(visualize=False) # spectral clustering estimation: sc = SpectralClustering(n_clusters=K) sc.fit(X) category", "= sc.predict(X) # visualize: color = ['red','blue','green','cyan','magenta'] labels = [f'Cluster{k:02d}' for k in", "Spectral Clustering 算法 import numpy as np from sklearn.cluster import KMeans import matplotlib.pyplot", "= k_means.labels_ def predict(self, data): \"\"\" Get cluster labels Parameters ---------- data: numpy.ndarray", "numpy.ndarray Testing set as N-by-D numpy.ndarray Returns ---------- result: numpy.ndarray data labels as", "spectral clustering Parameters ---------- visualize: boolean Whether to visualize the generated data \"\"\"", "generated data \"\"\" from sklearn.datasets import make_moons X, y = make_moons(N, noise=noise, random_state=random_state)", "eigval, eigvec = np.linalg.eig(L) # get features: idx_k_smallest = np.where(eigval < np.partition(eigval, self.__K)[self.__K])", "1], c=y, s=50, cmap='viridis') plt.show() return X if __name__ == '__main__': # create", "Dataset for Spectral Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='viridis')", "= np.exp(-A**2/(2*gamma**2)) # get laplacian matrix: L = csgraph.laplacian(A, normed=True) # spectral decomposition:", "= np.linalg.eig(L) # get features: idx_k_smallest = np.where(eigval < np.partition(eigval, self.__K)[self.__K]) features =", "data): \"\"\" Estimate the K centroids Parameters ---------- data: numpy.ndarray Training set as", "data \"\"\" from sklearn.datasets import make_moons X, y = make_moons(N, noise=noise, random_state=random_state) if", "fig, ax = plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for Spectral Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:, 0],", "'__main__': # create dataset: K = 2 X = generate_dataset(visualize=False) # spectral clustering", "range(K)] for k in range(K): plt.scatter(X[category == k][:,0], X[category == k][:,1], c=color[k], label=labels[k])", "linalg N, _ = data.shape # create affinity matrix -- kNN for connectivity:", "X = generate_dataset(visualize=False) # spectral clustering estimation: sc = SpectralClustering(n_clusters=K) sc.fit(X) category =", "== '__main__': # create dataset: K = 2 X = generate_dataset(visualize=False) # spectral", "= n_clusters self.__labels = None def fit(self, data): \"\"\" Estimate the K centroids", "estimation: sc = SpectralClustering(n_clusters=K) sc.fit(X) category = sc.predict(X) # visualize: color = ['red','blue','green','cyan','magenta']", "k in range(K)] for k in range(K): plt.scatter(X[category == k][:,0], X[category == k][:,1],", "np.hstack([eigvec[:, i] for i in idx_k_smallest]) # cluster using KMeans++ k_means = KMeans(init='k-means++',", "0], X[:, 1], c=y, s=50, cmap='viridis') plt.show() return X if __name__ == '__main__':", "\"\"\" Get cluster labels Parameters ---------- data: numpy.ndarray Testing set as N-by-D numpy.ndarray", "== k][:,0], X[category == k][:,1], c=color[k], label=labels[k]) plt.xlabel('X') plt.ylabel('Y') plt.legend() plt.title('Spectral Clustering Testcase')", "np.partition(eigval, self.__K)[self.__K]) features = np.hstack([eigvec[:, i] for i in idx_k_smallest]) # cluster using", "Returns ---------- None \"\"\" # TODO 01: implement SpectralClustering fit from sklearn.neighbors import", "Spectral Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='viridis') plt.show() return", "\"\"\" # TODO 01: implement SpectralClustering fit from sklearn.neighbors import kneighbors_graph from sklearn.metrics", "= plt.subplots(figsize=(16,9)) ax.set_title('Test Dataset for Spectral Clustering', fontsize=18, fontweight='demi') ax.scatter(X[:, 0], X[:, 1],", "KMeans++ k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6) k_means.fit(features) # get cluster ids: self.__labels =" ]
[ "already be running in the current thread. \"\"\" try: # We try this", "current thread. \"\"\" try: # We try this first, as in most situations", "annotations import asyncio import typing as t from concurrent.futures import ThreadPoolExecutor def run_sync(coroutine:", "coroutine synchronously - trying to accommodate as many edge cases as possible. 1.", "edge cases as possible. 1. When called within a coroutine. 2. When called", "concurrent.futures import ThreadPoolExecutor def run_sync(coroutine: t.Coroutine): \"\"\" Run the coroutine synchronously - trying", "\"\"\" try: # We try this first, as in most situations this will", "the current thread. \"\"\" try: # We try this first, as in most", "running in the current thread. \"\"\" try: # We try this first, as", "first, as in most situations this will work. return asyncio.run(coroutine) except RuntimeError: #", "When called within a coroutine. 2. When called from ``python -m asyncio``, or", "t from concurrent.futures import ThreadPoolExecutor def run_sync(coroutine: t.Coroutine): \"\"\" Run the coroutine synchronously", "this will work. return asyncio.run(coroutine) except RuntimeError: # An event loop already exists.", "-m asyncio``, or iPython with %autoawait enabled, which means an event loop may", "event loop may already be running in the current thread. \"\"\" try: #", "import asyncio import typing as t from concurrent.futures import ThreadPoolExecutor def run_sync(coroutine: t.Coroutine):", "to accommodate as many edge cases as possible. 1. When called within a", "try: # We try this first, as in most situations this will work.", "import annotations import asyncio import typing as t from concurrent.futures import ThreadPoolExecutor def", "loop already exists. with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(asyncio.run, coroutine) return future.result()", "which means an event loop may already be running in the current thread.", "%autoawait enabled, which means an event loop may already be running in the", "asyncio.run(coroutine) except RuntimeError: # An event loop already exists. with ThreadPoolExecutor(max_workers=1) as executor:", "except RuntimeError: # An event loop already exists. with ThreadPoolExecutor(max_workers=1) as executor: future", "in most situations this will work. return asyncio.run(coroutine) except RuntimeError: # An event", "# An event loop already exists. with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(asyncio.run,", "work. return asyncio.run(coroutine) except RuntimeError: # An event loop already exists. with ThreadPoolExecutor(max_workers=1)", "an event loop may already be running in the current thread. \"\"\" try:", "``python -m asyncio``, or iPython with %autoawait enabled, which means an event loop", "import typing as t from concurrent.futures import ThreadPoolExecutor def run_sync(coroutine: t.Coroutine): \"\"\" Run", "cases as possible. 1. When called within a coroutine. 2. When called from", "return asyncio.run(coroutine) except RuntimeError: # An event loop already exists. with ThreadPoolExecutor(max_workers=1) as", "asyncio import typing as t from concurrent.futures import ThreadPoolExecutor def run_sync(coroutine: t.Coroutine): \"\"\"", "in the current thread. \"\"\" try: # We try this first, as in", "<gh_stars>1-10 from __future__ import annotations import asyncio import typing as t from concurrent.futures", "run_sync(coroutine: t.Coroutine): \"\"\" Run the coroutine synchronously - trying to accommodate as many", "2. When called from ``python -m asyncio``, or iPython with %autoawait enabled, which", "accommodate as many edge cases as possible. 1. When called within a coroutine.", "from concurrent.futures import ThreadPoolExecutor def run_sync(coroutine: t.Coroutine): \"\"\" Run the coroutine synchronously -", "from __future__ import annotations import asyncio import typing as t from concurrent.futures import", "1. When called within a coroutine. 2. When called from ``python -m asyncio``,", "iPython with %autoawait enabled, which means an event loop may already be running", "as in most situations this will work. return asyncio.run(coroutine) except RuntimeError: # An", "situations this will work. return asyncio.run(coroutine) except RuntimeError: # An event loop already", "loop may already be running in the current thread. \"\"\" try: # We", "typing as t from concurrent.futures import ThreadPoolExecutor def run_sync(coroutine: t.Coroutine): \"\"\" Run the", "asyncio``, or iPython with %autoawait enabled, which means an event loop may already", "def run_sync(coroutine: t.Coroutine): \"\"\" Run the coroutine synchronously - trying to accommodate as", "# We try this first, as in most situations this will work. return", "event loop already exists. with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(asyncio.run, coroutine) return", "enabled, which means an event loop may already be running in the current", "try this first, as in most situations this will work. return asyncio.run(coroutine) except", "or iPython with %autoawait enabled, which means an event loop may already be", "will work. return asyncio.run(coroutine) except RuntimeError: # An event loop already exists. with", "\"\"\" Run the coroutine synchronously - trying to accommodate as many edge cases", "possible. 1. When called within a coroutine. 2. When called from ``python -m", "synchronously - trying to accommodate as many edge cases as possible. 1. When", "means an event loop may already be running in the current thread. \"\"\"", "may already be running in the current thread. \"\"\" try: # We try", "import ThreadPoolExecutor def run_sync(coroutine: t.Coroutine): \"\"\" Run the coroutine synchronously - trying to", "this first, as in most situations this will work. return asyncio.run(coroutine) except RuntimeError:", "Run the coroutine synchronously - trying to accommodate as many edge cases as", "thread. \"\"\" try: # We try this first, as in most situations this", "be running in the current thread. \"\"\" try: # We try this first,", "t.Coroutine): \"\"\" Run the coroutine synchronously - trying to accommodate as many edge", "most situations this will work. return asyncio.run(coroutine) except RuntimeError: # An event loop", "- trying to accommodate as many edge cases as possible. 1. When called", "coroutine. 2. When called from ``python -m asyncio``, or iPython with %autoawait enabled,", "RuntimeError: # An event loop already exists. with ThreadPoolExecutor(max_workers=1) as executor: future =", "many edge cases as possible. 1. When called within a coroutine. 2. When", "as possible. 1. When called within a coroutine. 2. When called from ``python", "within a coroutine. 2. When called from ``python -m asyncio``, or iPython with", "__future__ import annotations import asyncio import typing as t from concurrent.futures import ThreadPoolExecutor", "trying to accommodate as many edge cases as possible. 1. When called within", "called within a coroutine. 2. When called from ``python -m asyncio``, or iPython", "as many edge cases as possible. 1. When called within a coroutine. 2.", "An event loop already exists. with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit(asyncio.run, coroutine)", "with %autoawait enabled, which means an event loop may already be running in", "called from ``python -m asyncio``, or iPython with %autoawait enabled, which means an", "When called from ``python -m asyncio``, or iPython with %autoawait enabled, which means", "from ``python -m asyncio``, or iPython with %autoawait enabled, which means an event", "We try this first, as in most situations this will work. return asyncio.run(coroutine)", "a coroutine. 2. When called from ``python -m asyncio``, or iPython with %autoawait", "as t from concurrent.futures import ThreadPoolExecutor def run_sync(coroutine: t.Coroutine): \"\"\" Run the coroutine", "the coroutine synchronously - trying to accommodate as many edge cases as possible.", "ThreadPoolExecutor def run_sync(coroutine: t.Coroutine): \"\"\" Run the coroutine synchronously - trying to accommodate" ]
[ "sen=input('enter text: ') def translate(txt): result=[] vowel=['a','e','i','o','u'] for value in txt.lower(): if value", "if value in vowel: result.append(value) else: result.append(value) result.append('o') result.append(value) return result print(\"\".join(translate(sen))) #", "') def translate(txt): result=[] vowel=['a','e','i','o','u'] for value in txt.lower(): if value in vowel:", "for value in txt.lower(): if value in vowel: result.append(value) else: result.append(value) result.append('o') result.append(value)", "<filename>Fun Excercise/rovarskparket.py sen=input('enter text: ') def translate(txt): result=[] vowel=['a','e','i','o','u'] for value in txt.lower():", "Excercise/rovarskparket.py sen=input('enter text: ') def translate(txt): result=[] vowel=['a','e','i','o','u'] for value in txt.lower(): if", "def translate(txt): result=[] vowel=['a','e','i','o','u'] for value in txt.lower(): if value in vowel: result.append(value)", "txt.lower(): if value in vowel: result.append(value) else: result.append(value) result.append('o') result.append(value) return result print(\"\".join(translate(sen)))", "value in txt.lower(): if value in vowel: result.append(value) else: result.append(value) result.append('o') result.append(value) return", "text: ') def translate(txt): result=[] vowel=['a','e','i','o','u'] for value in txt.lower(): if value in", "value in vowel: result.append(value) else: result.append(value) result.append('o') result.append(value) return result print(\"\".join(translate(sen))) # print(translate(sen))", "result=[] vowel=['a','e','i','o','u'] for value in txt.lower(): if value in vowel: result.append(value) else: result.append(value)", "in txt.lower(): if value in vowel: result.append(value) else: result.append(value) result.append('o') result.append(value) return result", "translate(txt): result=[] vowel=['a','e','i','o','u'] for value in txt.lower(): if value in vowel: result.append(value) else:", "vowel=['a','e','i','o','u'] for value in txt.lower(): if value in vowel: result.append(value) else: result.append(value) result.append('o')" ]
[ "nothing and advance the position naturally else: self.position += 3 # Code 7", "ones and tens place into an opcode opcode = digits[0] + 10 *", "machine isn't already halted if self.state is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is already halted\")", "the instruction into digits and reverse them digits = list(reversed(self.getDigits(n))) # Zero fill", "self.position += 4 # Code 3 is input elif opcode == 3: #", "IntCodeMachineState.HALTED: break else: continue break outputSignals.append(previousValue) # Result is the highest output signal", "if (opcode == 5 and paramA != 0) or ( opcode == 6", "self.memory = list( map(lambda op: int(op), instructions.strip().split(\",\")) ) # Input value is None", "catch all output signals outputSignals = [] # Iterate through all permutations of", "amplifierSoftware = input_file.read().strip() # List to catch all output signals outputSignals = []", "# If in immediate mode, return the value directly if paramMode == 1:", "amplifiers.append(machine) # Loop through each machine in order until execution halts on the", "r + [n % 10] def splitInstruction(self, n): # Split the instruction into", "code position self.position += 2 # Code 5 and 6 are conditional jumps", "position by 4 self.position += 4 # Code 3 is input elif opcode", "0 while True: for i, machine in enumerate(amplifiers): machine.inputValue = previousValue machine.execute() previousValue", "self.splitInstruction( instruction ) # Code 99 means immediate termination if opcode == 99:", "value at the indicated pointer position outPointer = self.memory[self.position + 1] self.memory[outPointer] =", "2], paramModeB ) # If non-zero, set the position pointer if (opcode ==", "3 is input elif opcode == 3: # If input is not available,", "paramA = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) paramB = self.resolveValue( self.memory,", "{opcode} ({instruction}) at position {self.position}\" ) @click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file): \"\"\"Put your", "enum.auto() class IntCodeMachine: def __init__(self, instructions): # Machine starts in a clean state", "CLEAN = enum.auto() HALTED = enum.auto() WAITING_FOR_INPUT = enum.auto() class IntCodeMachine: def __init__(self,", "== 0 ): self.position = paramB # Else, do nothing and advance the", "the code position by 4 self.position += 4 # Code 2 is multiplication", "10 * digits[1] # Return the opcode and param modes return (opcode, digits[2],", "= 0 while True: for i, machine in enumerate(amplifiers): machine.inputValue = previousValue machine.execute()", "machine in order until execution halts on the last previousValue = 0 while", "paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) # If", "= paramB # Else, do nothing and advance the position naturally else: self.position", "self.memory, self.memory[self.position + 2], paramModeB ) productPointer = self.memory[self.position + 3] # Perform", "opcode == 99: self.state = IntCodeMachineState.HALTED break # Code 1 is addition elif", "Unknown opcode means there was an error else: raise RuntimeError( f\"Unknown opcode {opcode}", "the ones and tens place into an opcode opcode = digits[0] + 10", "the opcode and param modes return (opcode, digits[2], digits[3], digits[4]) def resolveValue(self, memory,", "3] # Perform the addition self.memory[productPointer] = paramA * paramB # Advance the", "value to create an intcode machine for phase in permutation: machine = IntCodeMachine(amplifierSoftware)", "modes return (opcode, digits[2], digits[3], digits[4]) def resolveValue(self, memory, param, paramMode): # If", "[] # Use the phase value to create an intcode machine for phase", "else: raise RuntimeError( f\"Unknown opcode {opcode} ({instruction}) at position {self.position}\" ) @click.command() @click.argument(\"input_file\",", "read position instruction = self.memory[self.position] # Split the opcode and params apart opcode,", "paramB # Else, do nothing and advance the position naturally else: self.position +=", "= enum.auto() class IntCodeMachine: def __init__(self, instructions): # Machine starts in a clean", "already halted if self.state is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is already halted\") # Loop", "execution halts on the last previousValue = 0 while True: for i, machine", "self.position = paramB # Else, do nothing and advance the position naturally else:", "[n % 10] def splitInstruction(self, n): # Split the instruction into digits and", "was an error else: raise RuntimeError( f\"Unknown opcode {opcode} ({instruction}) at position {self.position}\"", "flag = paramA == paramB # Write the value to memory self.memory[outputPointer] =", "on the opcode if opcode == 7: flag = paramA < paramB elif", "# Code 4 is output elif opcode == 4: # Determine the value", "= input_file.read().strip() # List to catch all output signals outputSignals = [] #", "paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) productPointer =", "halts on the last previousValue = 0 while True: for i, machine in", "opcode in [5, 6]: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position", "addition elif opcode == 1: # Get memory values paramA = self.resolveValue( self.memory,", "will start reading the opcodes at position 0 self.position = 0 # Recursive", "through all permutations of phase signals for permutation in itertools.permutations(range(5, 10)): amplifiers =", "and param modes return (opcode, digits[2], digits[3], digits[4]) def resolveValue(self, memory, param, paramMode):", "the phase setting machine.inputValue = phase machine.execute() amplifiers.append(machine) # Loop through each machine", "= self.splitInstruction( instruction ) # Code 99 means immediate termination if opcode ==", "= IntCodeMachineState.CLEAN # Convert the comma-delimited string of numbers into a list of", "= self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position", "paramB # Advance the code position by 4 self.position += 4 # Code", "opcode == 6 and paramA == 0 ): self.position = paramB # Else,", "signals outputSignals = [] # Iterate through all permutations of phase signals for", "Advance the code position by 4 self.position += 4 # Code 3 is", "len(digits)): digits.append(0) # Consolidate the ones and tens place into an opcode opcode", "Return the opcode and param modes return (opcode, digits[2], digits[3], digits[4]) def resolveValue(self,", "and paramA == 0 ): self.position = paramB # Else, do nothing and", "# Determine the value based on the opcode if opcode == 7: flag", "n < 10: return [n] else: r = self.getDigits(n // 10) return r", ") outputPointer = self.memory[self.position + 3] # Determine the value based on the", "if opcode == 7: flag = paramA < paramB elif opcode == 8:", "i, machine in enumerate(amplifiers): machine.inputValue = previousValue machine.execute() previousValue = machine.outputValues.pop(0) # When", "Advance the code position by 4 self.position += 4 # Code 2 is", "machine in enumerate(amplifiers): machine.inputValue = previousValue machine.execute() previousValue = machine.outputValues.pop(0) # When the", "self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) outputPointer = self.memory[self.position + 3] #", "self.memory[outputPointer] = int(flag) # Advance the code position by 4 self.position += 4", "input value self.inputValue = None # Advance the code position self.position += 2", "comma-delimited string of numbers into a list of ints self.memory = list( map(lambda", "position {self.position}\" ) @click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file): \"\"\"Put your puzzle execution code", "We will start reading the opcodes at position 0 self.position = 0 #", "output signals outputSignals = [] # Iterate through all permutations of phase signals", "< 10: return [n] else: r = self.getDigits(n // 10) return r +", "string of numbers into a list of ints self.memory = list( map(lambda op:", "list(reversed(self.getDigits(n))) # Zero fill the digits array for i in range(5 - len(digits)):", "+ paramB # Advance the code position by 4 self.position += 4 #", "on the last previousValue = 0 while True: for i, machine in enumerate(amplifiers):", "it value = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) self.outputValues.append(value) # Advance", "1: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA", "import enum import itertools # vendor imports import click class IntCodeMachineState(enum.Enum): CLEAN =", "at the current read position instruction = self.memory[self.position] # Split the opcode and", "a decimal into digits def getDigits(self, n): if n < 10: return [n]", "== 1: return param # Else, treat it as a pointer else: return", "): self.position = paramB # Else, do nothing and advance the position naturally", "else: r = self.getDigits(n // 10) return r + [n % 10] def", "self.position += 4 # Code 2 is multiplication elif opcode == 2: #", "def resolveValue(self, memory, param, paramMode): # If in immediate mode, return the value", "break # Store the value at the indicated pointer position outPointer = self.memory[self.position", "and tens place into an opcode opcode = digits[0] + 10 * digits[1]", "place into an opcode opcode = digits[0] + 10 * digits[1] # Return", "addition self.memory[sumPointer] = paramA + paramB # Advance the code position by 4", ") paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) # If non-zero,", ") productPointer = self.memory[self.position + 3] # Perform the addition self.memory[productPointer] = paramA", "Write the value to memory self.memory[outputPointer] = int(flag) # Advance the code position", "Result is the highest output signal print(\"RESULT:\", max(outputSignals)) # Execute cli function on", "enum import itertools # vendor imports import click class IntCodeMachineState(enum.Enum): CLEAN = enum.auto()", "multiplication elif opcode == 2: # Get memory values paramA = self.resolveValue( self.memory,", "the input value self.inputValue = None # Advance the code position self.position +=", "= self.memory[self.position] # Split the opcode and params apart opcode, paramModeA, paramModeB, paramModeC", "3] # Determine the value based on the opcode if opcode == 7:", "digits.append(0) # Consolidate the ones and tens place into an opcode opcode =", "paramModeB, paramModeC = self.splitInstruction( instruction ) # Code 99 means immediate termination if", "3 # Code 7 and 8 are comparison elif opcode in [7, 8]:", "True: for i, machine in enumerate(amplifiers): machine.inputValue = previousValue machine.execute() previousValue = machine.outputValues.pop(0)", "decimal into digits def getDigits(self, n): if n < 10: return [n] else:", "+ 2], paramModeB ) productPointer = self.memory[self.position + 3] # Perform the addition", "puzzle execution code here\"\"\" # Load the amplifier software instructions amplifierSoftware = input_file.read().strip()", "tens place into an opcode opcode = digits[0] + 10 * digits[1] #", "is None: self.state = IntCodeMachineState.WAITING_FOR_INPUT break # Store the value at the indicated", "# If input is not available, stop execution if self.inputValue is None: self.state", "setting machine.inputValue = phase machine.execute() amplifiers.append(machine) # Loop through each machine in order", "IntCodeMachine: def __init__(self, instructions): # Machine starts in a clean state self.state =", "surethe machine isn't already halted if self.state is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is already", "Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) paramB", "opcode = digits[0] + 10 * digits[1] # Return the opcode and param", "self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position +", "+= 2 # Code 4 is output elif opcode == 4: # Determine", ") @click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file): \"\"\"Put your puzzle execution code here\"\"\" #", "1], paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) #", "# Empty list to capture output values self.outputValues = [] # We will", "self.memory, self.memory[self.position + 1], paramModeA ) self.outputValues.append(value) # Advance the code position self.position", "in order until execution halts on the last previousValue = 0 while True:", "= paramA + paramB # Advance the code position by 4 self.position +=", "None: self.state = IntCodeMachineState.WAITING_FOR_INPUT break # Store the value at the indicated pointer", "= IntCodeMachine(amplifierSoftware) # First execution is for the phase setting machine.inputValue = phase", "paramA != 0) or ( opcode == 6 and paramA == 0 ):", "by 4 self.position += 4 # Code 2 is multiplication elif opcode ==", "and paramA != 0) or ( opcode == 6 and paramA == 0", "opcodes at position 0 self.position = 0 # Recursive function to split a", "is input elif opcode == 3: # If input is not available, stop", "paramMode): # If in immediate mode, return the value directly if paramMode ==", "Make surethe machine isn't already halted if self.state is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is", "phase setting machine.inputValue = phase machine.execute() amplifiers.append(machine) # Loop through each machine in", "7: flag = paramA < paramB elif opcode == 8: flag = paramA", "2 # Code 5 and 6 are conditional jumps elif opcode in [5,", "out the input value self.inputValue = None # Advance the code position self.position", "main(input_file): \"\"\"Put your puzzle execution code here\"\"\" # Load the amplifier software instructions", "value to memory self.memory[outputPointer] = int(flag) # Advance the code position by 4", "4 and machine.state is IntCodeMachineState.HALTED: break else: continue break outputSignals.append(previousValue) # Result is", "paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) sumPointer = self.memory[self.position +", ") # If non-zero, set the position pointer if (opcode == 5 and", "if opcode == 99: self.state = IntCodeMachineState.HALTED break # Code 1 is addition", "machine.inputValue = phase machine.execute() amplifiers.append(machine) # Loop through each machine in order until", "paramModeB ) outputPointer = self.memory[self.position + 3] # Determine the value based on", "else: continue break outputSignals.append(previousValue) # Result is the highest output signal print(\"RESULT:\", max(outputSignals))", "or ( opcode == 6 and paramA == 0 ): self.position = paramB", "itertools.permutations(range(5, 10)): amplifiers = [] # Use the phase value to create an", "the opcode and params apart opcode, paramModeA, paramModeB, paramModeC = self.splitInstruction( instruction )", "@click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file): \"\"\"Put your puzzle execution code here\"\"\" # Load", "= IntCodeMachineState.HALTED break # Code 1 is addition elif opcode == 1: #", "value is None self.inputValue = None # Empty list to capture output values", "mode, return the value directly if paramMode == 1: return param # Else,", "the position pointer if (opcode == 5 and paramA != 0) or (", "instruction = self.memory[self.position] # Split the opcode and params apart opcode, paramModeA, paramModeB,", "the phase value to create an intcode machine for phase in permutation: machine", "ints self.memory = list( map(lambda op: int(op), instructions.strip().split(\",\")) ) # Input value is", "opcode == 2: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position +", "self.position += 3 # Code 7 and 8 are comparison elif opcode in", "and reverse them digits = list(reversed(self.getDigits(n))) # Zero fill the digits array for", "self.outputValues = [] # We will start reading the opcodes at position 0", "# Machine starts in a clean state self.state = IntCodeMachineState.CLEAN # Convert the", "10) return r + [n % 10] def splitInstruction(self, n): # Split the", "at position 0 self.position = 0 # Recursive function to split a decimal", "conditional jumps elif opcode in [5, 6]: # Get memory values paramA =", "4 # Code 2 is multiplication elif opcode == 2: # Get memory", "= paramA == paramB # Write the value to memory self.memory[outputPointer] = int(flag)", "self.memory, self.memory[self.position + 2], paramModeB ) sumPointer = self.memory[self.position + 3] # print(\"ADD\",", "a clean state self.state = IntCodeMachineState.CLEAN # Convert the comma-delimited string of numbers", "If in immediate mode, return the value directly if paramMode == 1: return", "advance the position naturally else: self.position += 3 # Code 7 and 8", "self.memory[self.position + 3] # Perform the addition self.memory[productPointer] = paramA * paramB #", "opcode == 3: # If input is not available, stop execution if self.inputValue", "print(\"ADD\", paramA, paramB, \"->\", sumPointer) # Perform the addition self.memory[sumPointer] = paramA +", "in [5, 6]: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position +", "% 10] def splitInstruction(self, n): # Split the instruction into digits and reverse", "+= 3 # Code 7 and 8 are comparison elif opcode in [7,", "paramModeB ) # If non-zero, set the position pointer if (opcode == 5", "position instruction = self.memory[self.position] # Split the opcode and params apart opcode, paramModeA,", "Perform the addition self.memory[productPointer] = paramA * paramB # Advance the code position", "* paramB # Advance the code position by 4 self.position += 4 #", "1], paramModeA ) self.outputValues.append(value) # Advance the code position self.position += 2 #", "the addition self.memory[sumPointer] = paramA + paramB # Advance the code position by", "# First execution is for the phase setting machine.inputValue = phase machine.execute() amplifiers.append(machine)", "Loop through each machine in order until execution halts on the last previousValue", "<gh_stars>0 # stdlib imports import enum import itertools # vendor imports import click", "an opcode opcode = digits[0] + 10 * digits[1] # Return the opcode", "# Result is the highest output signal print(\"RESULT:\", max(outputSignals)) # Execute cli function", "[n] else: r = self.getDigits(n // 10) return r + [n % 10]", "into digits and reverse them digits = list(reversed(self.getDigits(n))) # Zero fill the digits", "# Advance the code position self.position += 2 # Code 5 and 6", "the end if i == 4 and machine.state is IntCodeMachineState.HALTED: break else: continue", "+ 3] # Determine the value based on the opcode if opcode ==", "def getDigits(self, n): if n < 10: return [n] else: r = self.getDigits(n", ") # Code 99 means immediate termination if opcode == 99: self.state =", "paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) outputPointer = self.memory[self.position +", "opcode {opcode} ({instruction}) at position {self.position}\" ) @click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file): \"\"\"Put", "intcode machine for phase in permutation: machine = IntCodeMachine(amplifierSoftware) # First execution is", "# Code 3 is input elif opcode == 3: # If input is", ") # Input value is None self.inputValue = None # Empty list to", "+= 2 # Code 5 and 6 are conditional jumps elif opcode in", "8 are comparison elif opcode in [7, 8]: # Get memory values paramA", "machine.execute() previousValue = machine.outputValues.pop(0) # When the last amp halts, that's the end", "raise RuntimeError( f\"Unknown opcode {opcode} ({instruction}) at position {self.position}\" ) @click.command() @click.argument(\"input_file\", type=click.File(\"r\"))", "position by 4 self.position += 4 # Unknown opcode means there was an", "digits and reverse them digits = list(reversed(self.getDigits(n))) # Zero fill the digits array", "Split the instruction into digits and reverse them digits = list(reversed(self.getDigits(n))) # Zero", "the last amp halts, that's the end if i == 4 and machine.state", "== 5 and paramA != 0) or ( opcode == 6 and paramA", "self.memory, self.memory[self.position + 2], paramModeB ) outputPointer = self.memory[self.position + 3] # Determine", "naturally else: self.position += 3 # Code 7 and 8 are comparison elif", "RuntimeError( f\"Unknown opcode {opcode} ({instruction}) at position {self.position}\" ) @click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def", "until execution halts on the last previousValue = 0 while True: for i,", "splitInstruction(self, n): # Split the instruction into digits and reverse them digits =", "(opcode, digits[2], digits[3], digits[4]) def resolveValue(self, memory, param, paramMode): # If in immediate", "opcode, paramModeA, paramModeB, paramModeC = self.splitInstruction( instruction ) # Code 99 means immediate", "Code 1 is addition elif opcode == 1: # Get memory values paramA", "# Recursive function to split a decimal into digits def getDigits(self, n): if", "None self.inputValue = None # Empty list to capture output values self.outputValues =", "addition self.memory[productPointer] = paramA * paramB # Advance the code position by 4", "phase in permutation: machine = IntCodeMachine(amplifierSoftware) # First execution is for the phase", "10] def splitInstruction(self, n): # Split the instruction into digits and reverse them", "IntCodeMachineState.HALTED break # Code 1 is addition elif opcode == 1: # Get", "3] # print(\"ADD\", paramA, paramB, \"->\", sumPointer) # Perform the addition self.memory[sumPointer] =", "opcode in [7, 8]: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position", "+ 10 * digits[1] # Return the opcode and param modes return (opcode,", "position self.position += 2 # Code 5 and 6 are conditional jumps elif", "Load the amplifier software instructions amplifierSoftware = input_file.read().strip() # List to catch all", "Zero fill the digits array for i in range(5 - len(digits)): digits.append(0) #", "2 # Code 4 is output elif opcode == 4: # Determine the", "None # Empty list to capture output values self.outputValues = [] # We", "r = self.getDigits(n // 10) return r + [n % 10] def splitInstruction(self,", "- len(digits)): digits.append(0) # Consolidate the ones and tens place into an opcode", "break # Code 1 is addition elif opcode == 1: # Get memory", "do nothing and advance the position naturally else: self.position += 3 # Code", "self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) self.outputValues.append(value) # Advance the code position", "# Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA )", "# Load the amplifier software instructions amplifierSoftware = input_file.read().strip() # List to catch", "values paramA = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) paramB = self.resolveValue(", "# Consolidate the ones and tens place into an opcode opcode = digits[0]", "outPointer = self.memory[self.position + 1] self.memory[outPointer] = self.inputValue # Zero out the input", "output signal print(\"RESULT:\", max(outputSignals)) # Execute cli function on main if __name__ ==", "== 6 and paramA == 0 ): self.position = paramB # Else, do", "flag = paramA < paramB elif opcode == 8: flag = paramA ==", "immediate termination if opcode == 99: self.state = IntCodeMachineState.HALTED break # Code 1", "break outputSignals.append(previousValue) # Result is the highest output signal print(\"RESULT:\", max(outputSignals)) # Execute", "output elif opcode == 4: # Determine the value and print it value", "and machine.state is IntCodeMachineState.HALTED: break else: continue break outputSignals.append(previousValue) # Result is the", "4 is output elif opcode == 4: # Determine the value and print", ") paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) outputPointer = self.memory[self.position", "machine.state is IntCodeMachineState.HALTED: break else: continue break outputSignals.append(previousValue) # Result is the highest", "self.memory, self.memory[self.position + 1], paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2],", "a pointer else: return memory[param] def execute(self): # Make surethe machine isn't already", "execution is for the phase setting machine.inputValue = phase machine.execute() amplifiers.append(machine) # Loop", "while True: # Get the code at the current read position instruction =", "# If non-zero, set the position pointer if (opcode == 5 and paramA", "means immediate termination if opcode == 99: self.state = IntCodeMachineState.HALTED break # Code", "paramA + paramB # Advance the code position by 4 self.position += 4", "# When the last amp halts, that's the end if i == 4", "input_file.read().strip() # List to catch all output signals outputSignals = [] # Iterate", "self.memory[self.position + 1], paramModeA ) self.outputValues.append(value) # Advance the code position self.position +=", "Store the value at the indicated pointer position outPointer = self.memory[self.position + 1]", "digits def getDigits(self, n): if n < 10: return [n] else: r =", "in immediate mode, return the value directly if paramMode == 1: return param", "print(\"RESULT:\", max(outputSignals)) # Execute cli function on main if __name__ == \"__main__\": main()", "# Perform the addition self.memory[sumPointer] = paramA + paramB # Advance the code", "paramB elif opcode == 8: flag = paramA == paramB # Write the", "opcode and param modes return (opcode, digits[2], digits[3], digits[4]) def resolveValue(self, memory, param,", "Code 5 and 6 are conditional jumps elif opcode in [5, 6]: #", "enumerate(amplifiers): machine.inputValue = previousValue machine.execute() previousValue = machine.outputValues.pop(0) # When the last amp", "enum.auto() WAITING_FOR_INPUT = enum.auto() class IntCodeMachine: def __init__(self, instructions): # Machine starts in", "code position by 4 self.position += 4 # Code 3 is input elif", "for the phase setting machine.inputValue = phase machine.execute() amplifiers.append(machine) # Loop through each", "self.getDigits(n // 10) return r + [n % 10] def splitInstruction(self, n): #", "Code 7 and 8 are comparison elif opcode in [7, 8]: # Get", "means there was an error else: raise RuntimeError( f\"Unknown opcode {opcode} ({instruction}) at", "Determine the value and print it value = self.resolveValue( self.memory, self.memory[self.position + 1],", "all permutations of phase signals for permutation in itertools.permutations(range(5, 10)): amplifiers = []", "# Code 7 and 8 are comparison elif opcode in [7, 8]: #", "the code at the current read position instruction = self.memory[self.position] # Split the", "def __init__(self, instructions): # Machine starts in a clean state self.state = IntCodeMachineState.CLEAN", "Convert the comma-delimited string of numbers into a list of ints self.memory =", "is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is already halted\") # Loop infinitely until we reach", "paramModeB ) sumPointer = self.memory[self.position + 3] # print(\"ADD\", paramA, paramB, \"->\", sumPointer)", "indicated pointer position outPointer = self.memory[self.position + 1] self.memory[outPointer] = self.inputValue # Zero", "elif opcode == 8: flag = paramA == paramB # Write the value", "== 4 and machine.state is IntCodeMachineState.HALTED: break else: continue break outputSignals.append(previousValue) # Result", "# Unknown opcode means there was an error else: raise RuntimeError( f\"Unknown opcode", "# Loop infinitely until we reach the termination instruction while True: # Get", "imports import click class IntCodeMachineState(enum.Enum): CLEAN = enum.auto() HALTED = enum.auto() WAITING_FOR_INPUT =", "we reach the termination instruction while True: # Get the code at the", "is addition elif opcode == 1: # Get memory values paramA = self.resolveValue(", "output values self.outputValues = [] # We will start reading the opcodes at", "int(op), instructions.strip().split(\",\")) ) # Input value is None self.inputValue = None # Empty", "n): # Split the instruction into digits and reverse them digits = list(reversed(self.getDigits(n)))", ") self.outputValues.append(value) # Advance the code position self.position += 2 # Code 5", "paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) outputPointer =", "# Else, treat it as a pointer else: return memory[param] def execute(self): #", "self.memory[self.position + 3] # Determine the value based on the opcode if opcode", "2: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA", "code position by 4 self.position += 4 # Unknown opcode means there was", "and 8 are comparison elif opcode in [7, 8]: # Get memory values", "of numbers into a list of ints self.memory = list( map(lambda op: int(op),", "current read position instruction = self.memory[self.position] # Split the opcode and params apart", "termination if opcode == 99: self.state = IntCodeMachineState.HALTED break # Code 1 is", "1 is addition elif opcode == 1: # Get memory values paramA =", "input elif opcode == 3: # If input is not available, stop execution", "[] # Iterate through all permutations of phase signals for permutation in itertools.permutations(range(5,", "permutation: machine = IntCodeMachine(amplifierSoftware) # First execution is for the phase setting machine.inputValue", "itertools # vendor imports import click class IntCodeMachineState(enum.Enum): CLEAN = enum.auto() HALTED =", "5 and 6 are conditional jumps elif opcode in [5, 6]: # Get", "= enum.auto() HALTED = enum.auto() WAITING_FOR_INPUT = enum.auto() class IntCodeMachine: def __init__(self, instructions):", "previousValue = machine.outputValues.pop(0) # When the last amp halts, that's the end if", "IntCodeMachineState.CLEAN # Convert the comma-delimited string of numbers into a list of ints", "# Get the code at the current read position instruction = self.memory[self.position] #", "position self.position += 2 # Code 4 is output elif opcode == 4:", "4 self.position += 4 # Code 2 is multiplication elif opcode == 2:", "# Advance the code position by 4 self.position += 4 # Code 3", "if self.state is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is already halted\") # Loop infinitely until", "each machine in order until execution halts on the last previousValue = 0", "= machine.outputValues.pop(0) # When the last amp halts, that's the end if i", "input is not available, stop execution if self.inputValue is None: self.state = IntCodeMachineState.WAITING_FOR_INPUT", "[7, 8]: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1],", "= self.memory[self.position + 3] # Determine the value based on the opcode if", "opcode if opcode == 7: flag = paramA < paramB elif opcode ==", "amp halts, that's the end if i == 4 and machine.state is IntCodeMachineState.HALTED:", "instruction while True: # Get the code at the current read position instruction", "self.state = IntCodeMachineState.WAITING_FOR_INPUT break # Store the value at the indicated pointer position", "to create an intcode machine for phase in permutation: machine = IntCodeMachine(amplifierSoftware) #", "Consolidate the ones and tens place into an opcode opcode = digits[0] +", "class IntCodeMachineState(enum.Enum): CLEAN = enum.auto() HALTED = enum.auto() WAITING_FOR_INPUT = enum.auto() class IntCodeMachine:", "+ 2], paramModeB ) # If non-zero, set the position pointer if (opcode", "paramModeC = self.splitInstruction( instruction ) # Code 99 means immediate termination if opcode", "for permutation in itertools.permutations(range(5, 10)): amplifiers = [] # Use the phase value", "continue break outputSignals.append(previousValue) # Result is the highest output signal print(\"RESULT:\", max(outputSignals)) #", "opcode == 8: flag = paramA == paramB # Write the value to", "is for the phase setting machine.inputValue = phase machine.execute() amplifiers.append(machine) # Loop through", "Recursive function to split a decimal into digits def getDigits(self, n): if n", "Else, treat it as a pointer else: return memory[param] def execute(self): # Make", "Perform the addition self.memory[sumPointer] = paramA + paramB # Advance the code position", "instruction ) # Code 99 means immediate termination if opcode == 99: self.state", "enum.auto() HALTED = enum.auto() WAITING_FOR_INPUT = enum.auto() class IntCodeMachine: def __init__(self, instructions): #", "# Code 99 means immediate termination if opcode == 99: self.state = IntCodeMachineState.HALTED", "infinitely until we reach the termination instruction while True: # Get the code", "the value to memory self.memory[outputPointer] = int(flag) # Advance the code position by", "First execution is for the phase setting machine.inputValue = phase machine.execute() amplifiers.append(machine) #", "self.inputValue = None # Advance the code position self.position += 2 # Code", "are comparison elif opcode in [7, 8]: # Get memory values paramA =", "as a pointer else: return memory[param] def execute(self): # Make surethe machine isn't", "based on the opcode if opcode == 7: flag = paramA < paramB", "= self.inputValue # Zero out the input value self.inputValue = None # Advance", "self.position = 0 # Recursive function to split a decimal into digits def", "elif opcode in [5, 6]: # Get memory values paramA = self.resolveValue( self.memory,", "6 are conditional jumps elif opcode in [5, 6]: # Get memory values", "= digits[0] + 10 * digits[1] # Return the opcode and param modes", "+= 4 # Code 3 is input elif opcode == 3: # If", "state self.state = IntCodeMachineState.CLEAN # Convert the comma-delimited string of numbers into a", "vendor imports import click class IntCodeMachineState(enum.Enum): CLEAN = enum.auto() HALTED = enum.auto() WAITING_FOR_INPUT", "= IntCodeMachineState.WAITING_FOR_INPUT break # Store the value at the indicated pointer position outPointer", "into an opcode opcode = digits[0] + 10 * digits[1] # Return the", "!= 0) or ( opcode == 6 and paramA == 0 ): self.position", "reach the termination instruction while True: # Get the code at the current", "4 # Unknown opcode means there was an error else: raise RuntimeError( f\"Unknown", "1], paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) outputPointer", "= enum.auto() WAITING_FOR_INPUT = enum.auto() class IntCodeMachine: def __init__(self, instructions): # Machine starts", "execution if self.inputValue is None: self.state = IntCodeMachineState.WAITING_FOR_INPUT break # Store the value", "position pointer if (opcode == 5 and paramA != 0) or ( opcode", "( opcode == 6 and paramA == 0 ): self.position = paramB #", "previousValue machine.execute() previousValue = machine.outputValues.pop(0) # When the last amp halts, that's the", "outputSignals.append(previousValue) # Result is the highest output signal print(\"RESULT:\", max(outputSignals)) # Execute cli", "the highest output signal print(\"RESULT:\", max(outputSignals)) # Execute cli function on main if", "[] # We will start reading the opcodes at position 0 self.position =", "self.position += 2 # Code 4 is output elif opcode == 4: #", "outputSignals = [] # Iterate through all permutations of phase signals for permutation", "is the highest output signal print(\"RESULT:\", max(outputSignals)) # Execute cli function on main", "and advance the position naturally else: self.position += 3 # Code 7 and", "# Code 2 is multiplication elif opcode == 2: # Get memory values", "1] self.memory[outPointer] = self.inputValue # Zero out the input value self.inputValue = None", "position by 4 self.position += 4 # Code 2 is multiplication elif opcode", "the code position by 4 self.position += 4 # Code 3 is input", "execution code here\"\"\" # Load the amplifier software instructions amplifierSoftware = input_file.read().strip() #", "< paramB elif opcode == 8: flag = paramA == paramB # Write", "is IntCodeMachineState.HALTED: break else: continue break outputSignals.append(previousValue) # Result is the highest output", "set the position pointer if (opcode == 5 and paramA != 0) or", "4: # Determine the value and print it value = self.resolveValue( self.memory, self.memory[self.position", "for phase in permutation: machine = IntCodeMachine(amplifierSoftware) # First execution is for the", "+ 2], paramModeB ) sumPointer = self.memory[self.position + 3] # print(\"ADD\", paramA, paramB,", "productPointer = self.memory[self.position + 3] # Perform the addition self.memory[productPointer] = paramA *", "until we reach the termination instruction while True: # Get the code at", "6 and paramA == 0 ): self.position = paramB # Else, do nothing", "= [] # Iterate through all permutations of phase signals for permutation in", "IntCodeMachine(amplifierSoftware) # First execution is for the phase setting machine.inputValue = phase machine.execute()", "params apart opcode, paramModeA, paramModeB, paramModeC = self.splitInstruction( instruction ) # Code 99", "phase machine.execute() amplifiers.append(machine) # Loop through each machine in order until execution halts", "f\"Unknown opcode {opcode} ({instruction}) at position {self.position}\" ) @click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file):", "order until execution halts on the last previousValue = 0 while True: for", "comparison elif opcode in [7, 8]: # Get memory values paramA = self.resolveValue(", "there was an error else: raise RuntimeError( f\"Unknown opcode {opcode} ({instruction}) at position", "execute(self): # Make surethe machine isn't already halted if self.state is IntCodeMachineState.HALTED: raise", "4 self.position += 4 # Unknown opcode means there was an error else:", "IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is already halted\") # Loop infinitely until we reach the", "# Code 5 and 6 are conditional jumps elif opcode in [5, 6]:", "value = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) self.outputValues.append(value) # Advance the", "99: self.state = IntCodeMachineState.HALTED break # Code 1 is addition elif opcode ==", "= self.getDigits(n // 10) return r + [n % 10] def splitInstruction(self, n):", "the code position self.position += 2 # Code 4 is output elif opcode", "self.memory[self.position] # Split the opcode and params apart opcode, paramModeA, paramModeB, paramModeC =", "8]: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA", "# Advance the code position self.position += 2 # Code 4 is output", "({instruction}) at position {self.position}\" ) @click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file): \"\"\"Put your puzzle", "here\"\"\" # Load the amplifier software instructions amplifierSoftware = input_file.read().strip() # List to", "the opcodes at position 0 self.position = 0 # Recursive function to split", "# Zero out the input value self.inputValue = None # Advance the code", "already halted\") # Loop infinitely until we reach the termination instruction while True:", "1: return param # Else, treat it as a pointer else: return memory[param]", "map(lambda op: int(op), instructions.strip().split(\",\")) ) # Input value is None self.inputValue = None", "[5, 6]: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1],", "machine.outputValues.pop(0) # When the last amp halts, that's the end if i ==", "raise RuntimeError(\"Machine is already halted\") # Loop infinitely until we reach the termination", "return (opcode, digits[2], digits[3], digits[4]) def resolveValue(self, memory, param, paramMode): # If in", "1], paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) productPointer", "= self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) # If non-zero, set the", "pointer if (opcode == 5 and paramA != 0) or ( opcode ==", "= int(flag) # Advance the code position by 4 self.position += 4 #", "jumps elif opcode in [5, 6]: # Get memory values paramA = self.resolveValue(", "self.memory, self.memory[self.position + 2], paramModeB ) # If non-zero, set the position pointer", "outputPointer = self.memory[self.position + 3] # Determine the value based on the opcode", "param # Else, treat it as a pointer else: return memory[param] def execute(self):", "the code position by 4 self.position += 4 # Unknown opcode means there", "= 0 # Recursive function to split a decimal into digits def getDigits(self,", "if i == 4 and machine.state is IntCodeMachineState.HALTED: break else: continue break outputSignals.append(previousValue)", "last amp halts, that's the end if i == 4 and machine.state is", "and print it value = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) self.outputValues.append(value)", "create an intcode machine for phase in permutation: machine = IntCodeMachine(amplifierSoftware) # First", "# Code 1 is addition elif opcode == 1: # Get memory values", "self.state is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is already halted\") # Loop infinitely until we", "\"->\", sumPointer) # Perform the addition self.memory[sumPointer] = paramA + paramB # Advance", "available, stop execution if self.inputValue is None: self.state = IntCodeMachineState.WAITING_FOR_INPUT break # Store", "Loop infinitely until we reach the termination instruction while True: # Get the", "Empty list to capture output values self.outputValues = [] # We will start", "position outPointer = self.memory[self.position + 1] self.memory[outPointer] = self.inputValue # Zero out the", "Code 3 is input elif opcode == 3: # If input is not", "self.memory[outPointer] = self.inputValue # Zero out the input value self.inputValue = None #", "paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) # If non-zero, set", "digits[4]) def resolveValue(self, memory, param, paramMode): # If in immediate mode, return the", "if n < 10: return [n] else: r = self.getDigits(n // 10) return", "pointer else: return memory[param] def execute(self): # Make surethe machine isn't already halted", "= self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) outputPointer = self.memory[self.position + 3]", "halts, that's the end if i == 4 and machine.state is IntCodeMachineState.HALTED: break", "import itertools # vendor imports import click class IntCodeMachineState(enum.Enum): CLEAN = enum.auto() HALTED", "paramA, paramB, \"->\", sumPointer) # Perform the addition self.memory[sumPointer] = paramA + paramB", "click class IntCodeMachineState(enum.Enum): CLEAN = enum.auto() HALTED = enum.auto() WAITING_FOR_INPUT = enum.auto() class", "range(5 - len(digits)): digits.append(0) # Consolidate the ones and tens place into an", "while True: for i, machine in enumerate(amplifiers): machine.inputValue = previousValue machine.execute() previousValue =", "99 means immediate termination if opcode == 99: self.state = IntCodeMachineState.HALTED break #", "Get the code at the current read position instruction = self.memory[self.position] # Split", "is multiplication elif opcode == 2: # Get memory values paramA = self.resolveValue(", "the indicated pointer position outPointer = self.memory[self.position + 1] self.memory[outPointer] = self.inputValue #", "# Advance the code position by 4 self.position += 4 # Unknown opcode", "your puzzle execution code here\"\"\" # Load the amplifier software instructions amplifierSoftware =", "= previousValue machine.execute() previousValue = machine.outputValues.pop(0) # When the last amp halts, that's", "True: # Get the code at the current read position instruction = self.memory[self.position]", "start reading the opcodes at position 0 self.position = 0 # Recursive function", "paramA == 0 ): self.position = paramB # Else, do nothing and advance", "values self.outputValues = [] # We will start reading the opcodes at position", "self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) # If non-zero, set the position", "amplifiers = [] # Use the phase value to create an intcode machine", "def main(input_file): \"\"\"Put your puzzle execution code here\"\"\" # Load the amplifier software", "Use the phase value to create an intcode machine for phase in permutation:", "None # Advance the code position self.position += 2 # Code 4 is", "5 and paramA != 0) or ( opcode == 6 and paramA ==", "is None self.inputValue = None # Empty list to capture output values self.outputValues", "int(flag) # Advance the code position by 4 self.position += 4 # Unknown", "self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) productPointer = self.memory[self.position + 3] #", "RuntimeError(\"Machine is already halted\") # Loop infinitely until we reach the termination instruction", "= paramA * paramB # Advance the code position by 4 self.position +=", "# Use the phase value to create an intcode machine for phase in", "{self.position}\" ) @click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file): \"\"\"Put your puzzle execution code here\"\"\"", "code position by 4 self.position += 4 # Code 2 is multiplication elif", "self.memory[self.position + 1] self.memory[outPointer] = self.inputValue # Zero out the input value self.inputValue", "to catch all output signals outputSignals = [] # Iterate through all permutations", "# Split the opcode and params apart opcode, paramModeA, paramModeB, paramModeC = self.splitInstruction(", "into a list of ints self.memory = list( map(lambda op: int(op), instructions.strip().split(\",\")) )", "self.outputValues.append(value) # Advance the code position self.position += 2 # Code 5 and", "# Convert the comma-delimited string of numbers into a list of ints self.memory", "end if i == 4 and machine.state is IntCodeMachineState.HALTED: break else: continue break", "the code position self.position += 2 # Code 5 and 6 are conditional", "paramModeA, paramModeB, paramModeC = self.splitInstruction( instruction ) # Code 99 means immediate termination", "imports import enum import itertools # vendor imports import click class IntCodeMachineState(enum.Enum): CLEAN", "2], paramModeB ) productPointer = self.memory[self.position + 3] # Perform the addition self.memory[productPointer]", "else: return memory[param] def execute(self): # Make surethe machine isn't already halted if", "the value and print it value = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA", "the value directly if paramMode == 1: return param # Else, treat it", "and params apart opcode, paramModeA, paramModeB, paramModeC = self.splitInstruction( instruction ) # Code", "split a decimal into digits def getDigits(self, n): if n < 10: return", "machine.inputValue = previousValue machine.execute() previousValue = machine.outputValues.pop(0) # When the last amp halts,", "the termination instruction while True: # Get the code at the current read", "memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) paramB =", "halted if self.state is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is already halted\") # Loop infinitely", "opcode and params apart opcode, paramModeA, paramModeB, paramModeC = self.splitInstruction( instruction ) #", "the opcode if opcode == 7: flag = paramA < paramB elif opcode", "def execute(self): # Make surethe machine isn't already halted if self.state is IntCodeMachineState.HALTED:", "# Input value is None self.inputValue = None # Empty list to capture", "the value based on the opcode if opcode == 7: flag = paramA", "list of ints self.memory = list( map(lambda op: int(op), instructions.strip().split(\",\")) ) # Input", "the value at the indicated pointer position outPointer = self.memory[self.position + 1] self.memory[outPointer]", "2], paramModeB ) sumPointer = self.memory[self.position + 3] # print(\"ADD\", paramA, paramB, \"->\",", "numbers into a list of ints self.memory = list( map(lambda op: int(op), instructions.strip().split(\",\"))", "print it value = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) self.outputValues.append(value) #", "signals for permutation in itertools.permutations(range(5, 10)): amplifiers = [] # Use the phase", "= None # Advance the code position self.position += 2 # Code 4", "fill the digits array for i in range(5 - len(digits)): digits.append(0) # Consolidate", "to memory self.memory[outputPointer] = int(flag) # Advance the code position by 4 self.position", "@click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file): \"\"\"Put your puzzle execution code here\"\"\" # Load the", "2 is multiplication elif opcode == 2: # Get memory values paramA =", "position 0 self.position = 0 # Recursive function to split a decimal into", "* digits[1] # Return the opcode and param modes return (opcode, digits[2], digits[3],", "self.state = IntCodeMachineState.CLEAN # Convert the comma-delimited string of numbers into a list", "Split the opcode and params apart opcode, paramModeA, paramModeB, paramModeC = self.splitInstruction( instruction", "= self.memory[self.position + 3] # Perform the addition self.memory[productPointer] = paramA * paramB", "class IntCodeMachine: def __init__(self, instructions): # Machine starts in a clean state self.state", "IntCodeMachineState.WAITING_FOR_INPUT break # Store the value at the indicated pointer position outPointer =", "+ 2], paramModeB ) outputPointer = self.memory[self.position + 3] # Determine the value", "memory self.memory[outputPointer] = int(flag) # Advance the code position by 4 self.position +=", "+= 4 # Unknown opcode means there was an error else: raise RuntimeError(", "+ 3] # print(\"ADD\", paramA, paramB, \"->\", sumPointer) # Perform the addition self.memory[sumPointer]", "// 10) return r + [n % 10] def splitInstruction(self, n): # Split", ") paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) sumPointer = self.memory[self.position", "return param # Else, treat it as a pointer else: return memory[param] def", "= self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA ) self.outputValues.append(value) # Advance the code", "param modes return (opcode, digits[2], digits[3], digits[4]) def resolveValue(self, memory, param, paramMode): #", "# print(\"ADD\", paramA, paramB, \"->\", sumPointer) # Perform the addition self.memory[sumPointer] = paramA", "7 and 8 are comparison elif opcode in [7, 8]: # Get memory", "0 # Recursive function to split a decimal into digits def getDigits(self, n):", "all output signals outputSignals = [] # Iterate through all permutations of phase", ") paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) productPointer = self.memory[self.position", "digits array for i in range(5 - len(digits)): digits.append(0) # Consolidate the ones", "digits[1] # Return the opcode and param modes return (opcode, digits[2], digits[3], digits[4])", "for i in range(5 - len(digits)): digits.append(0) # Consolidate the ones and tens", "opcode == 4: # Determine the value and print it value = self.resolveValue(", "Determine the value based on the opcode if opcode == 7: flag =", "List to catch all output signals outputSignals = [] # Iterate through all", "opcode opcode = digits[0] + 10 * digits[1] # Return the opcode and", "self.memory[self.position + 2], paramModeB ) productPointer = self.memory[self.position + 3] # Perform the", "elif opcode == 4: # Determine the value and print it value =", "amplifier software instructions amplifierSoftware = input_file.read().strip() # List to catch all output signals", "into digits def getDigits(self, n): if n < 10: return [n] else: r", "the addition self.memory[productPointer] = paramA * paramB # Advance the code position by", "# Perform the addition self.memory[productPointer] = paramA * paramB # Advance the code", "immediate mode, return the value directly if paramMode == 1: return param #", "signal print(\"RESULT:\", max(outputSignals)) # Execute cli function on main if __name__ == \"__main__\":", "4 self.position += 4 # Code 3 is input elif opcode == 3:", "self.state = IntCodeMachineState.HALTED break # Code 1 is addition elif opcode == 1:", "= [] # Use the phase value to create an intcode machine for", "== 1: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1],", "by 4 self.position += 4 # Code 3 is input elif opcode ==", "n): if n < 10: return [n] else: r = self.getDigits(n // 10)", "code position self.position += 2 # Code 4 is output elif opcode ==", "if paramMode == 1: return param # Else, treat it as a pointer", "= [] # We will start reading the opcodes at position 0 self.position", "the current read position instruction = self.memory[self.position] # Split the opcode and params", "stdlib imports import enum import itertools # vendor imports import click class IntCodeMachineState(enum.Enum):", "paramA < paramB elif opcode == 8: flag = paramA == paramB #", "at the indicated pointer position outPointer = self.memory[self.position + 1] self.memory[outPointer] = self.inputValue", "at position {self.position}\" ) @click.command() @click.argument(\"input_file\", type=click.File(\"r\")) def main(input_file): \"\"\"Put your puzzle execution", "== paramB # Write the value to memory self.memory[outputPointer] = int(flag) # Advance", "to capture output values self.outputValues = [] # We will start reading the", "= self.memory[self.position + 3] # print(\"ADD\", paramA, paramB, \"->\", sumPointer) # Perform the", "function to split a decimal into digits def getDigits(self, n): if n <", "apart opcode, paramModeA, paramModeB, paramModeC = self.splitInstruction( instruction ) # Code 99 means", "If non-zero, set the position pointer if (opcode == 5 and paramA !=", "1], paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) sumPointer", "non-zero, set the position pointer if (opcode == 5 and paramA != 0)", "in range(5 - len(digits)): digits.append(0) # Consolidate the ones and tens place into", "opcode means there was an error else: raise RuntimeError( f\"Unknown opcode {opcode} ({instruction})", "reading the opcodes at position 0 self.position = 0 # Recursive function to", "+ 1] self.memory[outPointer] = self.inputValue # Zero out the input value self.inputValue =", "elif opcode == 1: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position", "i == 4 and machine.state is IntCodeMachineState.HALTED: break else: continue break outputSignals.append(previousValue) #", "break else: continue break outputSignals.append(previousValue) # Result is the highest output signal print(\"RESULT:\",", "If input is not available, stop execution if self.inputValue is None: self.state =", "last previousValue = 0 while True: for i, machine in enumerate(amplifiers): machine.inputValue =", "self.memory[self.position + 2], paramModeB ) outputPointer = self.memory[self.position + 3] # Determine the", "paramModeA ) self.outputValues.append(value) # Advance the code position self.position += 2 # Code", "if self.inputValue is None: self.state = IntCodeMachineState.WAITING_FOR_INPUT break # Store the value at", "value self.inputValue = None # Advance the code position self.position += 2 #", "+= 4 # Code 2 is multiplication elif opcode == 2: # Get", "# Loop through each machine in order until execution halts on the last", "op: int(op), instructions.strip().split(\",\")) ) # Input value is None self.inputValue = None #", "# Else, do nothing and advance the position naturally else: self.position += 3", "value directly if paramMode == 1: return param # Else, treat it as", "directly if paramMode == 1: return param # Else, treat it as a", "Advance the code position self.position += 2 # Code 4 is output elif", "sumPointer = self.memory[self.position + 3] # print(\"ADD\", paramA, paramB, \"->\", sumPointer) # Perform", "self.position += 2 # Code 5 and 6 are conditional jumps elif opcode", "are conditional jumps elif opcode in [5, 6]: # Get memory values paramA", "# Determine the value and print it value = self.resolveValue( self.memory, self.memory[self.position +", "in a clean state self.state = IntCodeMachineState.CLEAN # Convert the comma-delimited string of", "paramModeB ) productPointer = self.memory[self.position + 3] # Perform the addition self.memory[productPointer] =", "return r + [n % 10] def splitInstruction(self, n): # Split the instruction", "self.memory[productPointer] = paramA * paramB # Advance the code position by 4 self.position", "Code 2 is multiplication elif opcode == 2: # Get memory values paramA", "3: # If input is not available, stop execution if self.inputValue is None:", "Code 4 is output elif opcode == 4: # Determine the value and", "instructions amplifierSoftware = input_file.read().strip() # List to catch all output signals outputSignals =", "machine.execute() amplifiers.append(machine) # Loop through each machine in order until execution halts on", "2], paramModeB ) outputPointer = self.memory[self.position + 3] # Determine the value based", "paramB # Write the value to memory self.memory[outputPointer] = int(flag) # Advance the", "Zero out the input value self.inputValue = None # Advance the code position", "0) or ( opcode == 6 and paramA == 0 ): self.position =", "the last previousValue = 0 while True: for i, machine in enumerate(amplifiers): machine.inputValue", "= self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) productPointer = self.memory[self.position + 3]", "0 self.position = 0 # Recursive function to split a decimal into digits", "the digits array for i in range(5 - len(digits)): digits.append(0) # Consolidate the", "termination instruction while True: # Get the code at the current read position", "digits[2], digits[3], digits[4]) def resolveValue(self, memory, param, paramMode): # If in immediate mode,", "return the value directly if paramMode == 1: return param # Else, treat", "in [7, 8]: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position +", "is already halted\") # Loop infinitely until we reach the termination instruction while", "an error else: raise RuntimeError( f\"Unknown opcode {opcode} ({instruction}) at position {self.position}\" )", "10: return [n] else: r = self.getDigits(n // 10) return r + [n", "= list(reversed(self.getDigits(n))) # Zero fill the digits array for i in range(5 -", "+ 1], paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB )", "+ 1], paramModeA ) self.outputValues.append(value) # Advance the code position self.position += 2", "+ [n % 10] def splitInstruction(self, n): # Split the instruction into digits", "phase signals for permutation in itertools.permutations(range(5, 10)): amplifiers = [] # Use the", "getDigits(self, n): if n < 10: return [n] else: r = self.getDigits(n //", "return [n] else: r = self.getDigits(n // 10) return r + [n %", "elif opcode == 3: # If input is not available, stop execution if", "self.inputValue is None: self.state = IntCodeMachineState.WAITING_FOR_INPUT break # Store the value at the", "phase value to create an intcode machine for phase in permutation: machine =", "= None # Empty list to capture output values self.outputValues = [] #", "that's the end if i == 4 and machine.state is IntCodeMachineState.HALTED: break else:", "0 ): self.position = paramB # Else, do nothing and advance the position", "error else: raise RuntimeError( f\"Unknown opcode {opcode} ({instruction}) at position {self.position}\" ) @click.command()", "# Store the value at the indicated pointer position outPointer = self.memory[self.position +", "# stdlib imports import enum import itertools # vendor imports import click class", "\"\"\"Put your puzzle execution code here\"\"\" # Load the amplifier software instructions amplifierSoftware", "# Split the instruction into digits and reverse them digits = list(reversed(self.getDigits(n))) #", "digits[0] + 10 * digits[1] # Return the opcode and param modes return", "the position naturally else: self.position += 3 # Code 7 and 8 are", "digits = list(reversed(self.getDigits(n))) # Zero fill the digits array for i in range(5", "self.memory[self.position + 3] # print(\"ADD\", paramA, paramB, \"->\", sumPointer) # Perform the addition", "return memory[param] def execute(self): # Make surethe machine isn't already halted if self.state", "self.inputValue # Zero out the input value self.inputValue = None # Advance the", "Else, do nothing and advance the position naturally else: self.position += 3 #", "an intcode machine for phase in permutation: machine = IntCodeMachine(amplifierSoftware) # First execution", "by 4 self.position += 4 # Unknown opcode means there was an error", "pointer position outPointer = self.memory[self.position + 1] self.memory[outPointer] = self.inputValue # Zero out", "value and print it value = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA )", "a list of ints self.memory = list( map(lambda op: int(op), instructions.strip().split(\",\")) ) #", "through each machine in order until execution halts on the last previousValue =", "instruction into digits and reverse them digits = list(reversed(self.getDigits(n))) # Zero fill the", "paramA == paramB # Write the value to memory self.memory[outputPointer] = int(flag) #", "= self.memory[self.position + 1] self.memory[outPointer] = self.inputValue # Zero out the input value", "HALTED = enum.auto() WAITING_FOR_INPUT = enum.auto() class IntCodeMachine: def __init__(self, instructions): # Machine", "paramMode == 1: return param # Else, treat it as a pointer else:", "in itertools.permutations(range(5, 10)): amplifiers = [] # Use the phase value to create", "paramB, \"->\", sumPointer) # Perform the addition self.memory[sumPointer] = paramA + paramB #", "position naturally else: self.position += 3 # Code 7 and 8 are comparison", "= phase machine.execute() amplifiers.append(machine) # Loop through each machine in order until execution", "treat it as a pointer else: return memory[param] def execute(self): # Make surethe", "the comma-delimited string of numbers into a list of ints self.memory = list(", "instructions.strip().split(\",\")) ) # Input value is None self.inputValue = None # Empty list", "opcode == 1: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position +", "Iterate through all permutations of phase signals for permutation in itertools.permutations(range(5, 10)): amplifiers", "starts in a clean state self.state = IntCodeMachineState.CLEAN # Convert the comma-delimited string", "= paramA < paramB elif opcode == 8: flag = paramA == paramB", "for i, machine in enumerate(amplifiers): machine.inputValue = previousValue machine.execute() previousValue = machine.outputValues.pop(0) #", "elif opcode == 2: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position", "# Write the value to memory self.memory[outputPointer] = int(flag) # Advance the code", "8: flag = paramA == paramB # Write the value to memory self.memory[outputPointer]", "== 99: self.state = IntCodeMachineState.HALTED break # Code 1 is addition elif opcode", "clean state self.state = IntCodeMachineState.CLEAN # Convert the comma-delimited string of numbers into", "IntCodeMachineState(enum.Enum): CLEAN = enum.auto() HALTED = enum.auto() WAITING_FOR_INPUT = enum.auto() class IntCodeMachine: def", "halted\") # Loop infinitely until we reach the termination instruction while True: #", "memory, param, paramMode): # If in immediate mode, return the value directly if", "highest output signal print(\"RESULT:\", max(outputSignals)) # Execute cli function on main if __name__", "(opcode == 5 and paramA != 0) or ( opcode == 6 and", "Advance the code position self.position += 2 # Code 5 and 6 are", "the amplifier software instructions amplifierSoftware = input_file.read().strip() # List to catch all output", "digits[3], digits[4]) def resolveValue(self, memory, param, paramMode): # If in immediate mode, return", "not available, stop execution if self.inputValue is None: self.state = IntCodeMachineState.WAITING_FOR_INPUT break #", "self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) sumPointer = self.memory[self.position + 3] #", "memory[param] def execute(self): # Make surethe machine isn't already halted if self.state is", "code at the current read position instruction = self.memory[self.position] # Split the opcode", "array for i in range(5 - len(digits)): digits.append(0) # Consolidate the ones and", "4 # Code 3 is input elif opcode == 3: # If input", "in permutation: machine = IntCodeMachine(amplifierSoftware) # First execution is for the phase setting", "paramA * paramB # Advance the code position by 4 self.position += 4", "# Make surethe machine isn't already halted if self.state is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine", "# List to catch all output signals outputSignals = [] # Iterate through", "== 4: # Determine the value and print it value = self.resolveValue( self.memory,", "it as a pointer else: return memory[param] def execute(self): # Make surethe machine", ") sumPointer = self.memory[self.position + 3] # print(\"ADD\", paramA, paramB, \"->\", sumPointer) #", "machine for phase in permutation: machine = IntCodeMachine(amplifierSoftware) # First execution is for", "instructions): # Machine starts in a clean state self.state = IntCodeMachineState.CLEAN # Convert", "Code 99 means immediate termination if opcode == 99: self.state = IntCodeMachineState.HALTED break", "software instructions amplifierSoftware = input_file.read().strip() # List to catch all output signals outputSignals", "else: self.position += 3 # Code 7 and 8 are comparison elif opcode", "opcode == 7: flag = paramA < paramB elif opcode == 8: flag", "permutations of phase signals for permutation in itertools.permutations(range(5, 10)): amplifiers = [] #", "machine = IntCodeMachine(amplifierSoftware) # First execution is for the phase setting machine.inputValue =", "capture output values self.outputValues = [] # We will start reading the opcodes", "def splitInstruction(self, n): # Split the instruction into digits and reverse them digits", "# Iterate through all permutations of phase signals for permutation in itertools.permutations(range(5, 10)):", "__init__(self, instructions): # Machine starts in a clean state self.state = IntCodeMachineState.CLEAN #", "# Zero fill the digits array for i in range(5 - len(digits)): digits.append(0)", "to split a decimal into digits def getDigits(self, n): if n < 10:", "# Return the opcode and param modes return (opcode, digits[2], digits[3], digits[4]) def", "stop execution if self.inputValue is None: self.state = IntCodeMachineState.WAITING_FOR_INPUT break # Store the", "isn't already halted if self.state is IntCodeMachineState.HALTED: raise RuntimeError(\"Machine is already halted\") #", "# Advance the code position by 4 self.position += 4 # Code 2", "i in range(5 - len(digits)): digits.append(0) # Consolidate the ones and tens place", "# vendor imports import click class IntCodeMachineState(enum.Enum): CLEAN = enum.auto() HALTED = enum.auto()", "== 3: # If input is not available, stop execution if self.inputValue is", "is not available, stop execution if self.inputValue is None: self.state = IntCodeMachineState.WAITING_FOR_INPUT break", "is output elif opcode == 4: # Determine the value and print it", "elif opcode in [7, 8]: # Get memory values paramA = self.resolveValue( self.memory,", "of ints self.memory = list( map(lambda op: int(op), instructions.strip().split(\",\")) ) # Input value", "code here\"\"\" # Load the amplifier software instructions amplifierSoftware = input_file.read().strip() # List", "sumPointer) # Perform the addition self.memory[sumPointer] = paramA + paramB # Advance the", "permutation in itertools.permutations(range(5, 10)): amplifiers = [] # Use the phase value to", "param, paramMode): # If in immediate mode, return the value directly if paramMode", "paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) productPointer = self.memory[self.position +", "reverse them digits = list(reversed(self.getDigits(n))) # Zero fill the digits array for i", "Machine starts in a clean state self.state = IntCodeMachineState.CLEAN # Convert the comma-delimited", "and 6 are conditional jumps elif opcode in [5, 6]: # Get memory", "Advance the code position by 4 self.position += 4 # Unknown opcode means", "previousValue = 0 while True: for i, machine in enumerate(amplifiers): machine.inputValue = previousValue", "WAITING_FOR_INPUT = enum.auto() class IntCodeMachine: def __init__(self, instructions): # Machine starts in a", "in enumerate(amplifiers): machine.inputValue = previousValue machine.execute() previousValue = machine.outputValues.pop(0) # When the last", "self.position += 4 # Unknown opcode means there was an error else: raise", "self.memory[self.position + 2], paramModeB ) # If non-zero, set the position pointer if", "10)): amplifiers = [] # Use the phase value to create an intcode", "# We will start reading the opcodes at position 0 self.position = 0", "self.memory[self.position + 2], paramModeB ) sumPointer = self.memory[self.position + 3] # print(\"ADD\", paramA,", "them digits = list(reversed(self.getDigits(n))) # Zero fill the digits array for i in", "When the last amp halts, that's the end if i == 4 and", "6]: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1], paramModeA", "list( map(lambda op: int(op), instructions.strip().split(\",\")) ) # Input value is None self.inputValue =", "list to capture output values self.outputValues = [] # We will start reading", "import click class IntCodeMachineState(enum.Enum): CLEAN = enum.auto() HALTED = enum.auto() WAITING_FOR_INPUT = enum.auto()", "value based on the opcode if opcode == 7: flag = paramA <", "Input value is None self.inputValue = None # Empty list to capture output", "== 7: flag = paramA < paramB elif opcode == 8: flag =", "== 2: # Get memory values paramA = self.resolveValue( self.memory, self.memory[self.position + 1],", "self.memory[self.position + 1], paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB", "of phase signals for permutation in itertools.permutations(range(5, 10)): amplifiers = [] # Use", "self.inputValue = None # Empty list to capture output values self.outputValues = []", "paramModeA ) paramB = self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) sumPointer =", "== 8: flag = paramA == paramB # Write the value to memory", "resolveValue(self, memory, param, paramMode): # If in immediate mode, return the value directly", "= self.resolveValue( self.memory, self.memory[self.position + 2], paramModeB ) sumPointer = self.memory[self.position + 3]", "type=click.File(\"r\")) def main(input_file): \"\"\"Put your puzzle execution code here\"\"\" # Load the amplifier", "= list( map(lambda op: int(op), instructions.strip().split(\",\")) ) # Input value is None self.inputValue", "+ 3] # Perform the addition self.memory[productPointer] = paramA * paramB # Advance", "self.memory[sumPointer] = paramA + paramB # Advance the code position by 4 self.position" ]
[ "pytest import tifffile from tifffolder import LLSFolder ADD_REL = 423345 @pytest.fixture def lls_folder(tmp_path):", "from tifffolder import LLSFolder ADD_REL = 423345 @pytest.fixture def lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\"", "tf = LLSFolder(lls_folder) assert tf.asarray().shape == (10, 2, 16, 16, 16) assert tf.asarray(t=0).shape", "16, 2)).shape == (8, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape ==", "def test_b(lls_folder): tf = LLSFolder(lls_folder) assert tf.asarray().shape == (10, 2, 16, 16, 16)", "[os.path.basename(i) for i in names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder): tf", "= 100 im = np.random.rand(16, 16, 16) for w, t in product(wave, time):", "import pytest import tifffile from tifffolder import LLSFolder ADD_REL = 423345 @pytest.fixture def", "im = np.random.rand(16, 16, 16) for w, t in product(wave, time): fname =", "LLSFolder ADD_REL = 423345 @pytest.fixture def lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488,", "i = 100 im = np.random.rand(16, 16, 16) for w, t in product(wave,", "= LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for i in names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ]", "from itertools import product import numpy as np import pytest import tifffile from", "test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for i in names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\",", "\"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488, 560] time = range(10) i = 100 im =", "16, 16, 16) assert tf.asarray(t=0, c=0).shape == (16, 16, 16) assert tf.asarray(t=0, c=0,", "== (8, 16, 16) assert tf[0].shape == (2, 16, 16, 16) assert tf[0,", "wave = [488, 560] time = range(10) i = 100 im = np.random.rand(16,", "assert tf.asarray(t=0, c=0).shape == (16, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape", "tf.asarray(t=0, c=0).shape == (16, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape ==", "ab=t * i, rel=t * i + ADD_REL ) tifffile.imsave(tmp_path / fname, im)", "np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0]) np.testing.assert_allclose(tf.asarray(t=0, c=0, z=range(0, 16, 2)), tf[0, 0,", "ADD_REL ) tifffile.imsave(tmp_path / fname, im) return tmp_path def test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0)", "[ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder): tf = LLSFolder(lls_folder) assert tf.asarray().shape == (10,", "assert tf.asarray().shape == (10, 2, 16, 16, 16) assert tf.asarray(t=0).shape == (2, 16,", "= \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488, 560] time = range(10) i = 100 im", "16) assert tf.asarray(t=0).shape == (2, 16, 16, 16) assert tf.asarray(t=0, c=0).shape == (16,", "os from itertools import product import numpy as np import pytest import tifffile", "assert tf[0, 0, :16:2].shape == (8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0,", "tf[0, 0].shape == (16, 16, 16) assert tf[0, 0, :16:2].shape == (8, 16,", "= range(10) i = 100 im = np.random.rand(16, 16, 16) for w, t", "assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16) assert tf[0].shape ==", "np.random.rand(16, 16, 16) for w, t in product(wave, time): fname = template.format( c=wave.index(w),", "import tifffile from tifffolder import LLSFolder ADD_REL = 423345 @pytest.fixture def lls_folder(tmp_path): template", "560] time = range(10) i = 100 im = np.random.rand(16, 16, 16) for", "2)).shape == (8, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8,", "16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16) assert tf[0].shape", "lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488, 560] time = range(10) i =", "LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for i in names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def", "template.format( c=wave.index(w), w=w, t=t, ab=t * i, rel=t * i + ADD_REL )", "] def test_b(lls_folder): tf = LLSFolder(lls_folder) assert tf.asarray().shape == (10, 2, 16, 16,", "numpy as np import pytest import tifffile from tifffolder import LLSFolder ADD_REL =", "2, 16, 16, 16) assert tf.asarray(t=0).shape == (2, 16, 16, 16) assert tf.asarray(t=0,", "tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16) assert tf.asarray(t=0, c=0, z=range(0,", "time): fname = template.format( c=wave.index(w), w=w, t=t, ab=t * i, rel=t * i", "16, 16) assert tf[0, 0, :16:2].shape == (8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0,", "== (16, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16,", "import LLSFolder ADD_REL = 423345 @pytest.fixture def lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave =", "import os from itertools import product import numpy as np import pytest import", "LLSFolder(lls_folder) assert tf.asarray().shape == (10, 2, 16, 16, 16) assert tf.asarray(t=0).shape == (2,", "16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16) assert tf.asarray(t=0,", "(8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0]) np.testing.assert_allclose(tf.asarray(t=0, c=0, z=range(0, 16,", "c=wave.index(w), w=w, t=t, ab=t * i, rel=t * i + ADD_REL ) tifffile.imsave(tmp_path", "for w, t in product(wave, time): fname = template.format( c=wave.index(w), w=w, t=t, ab=t", "/ fname, im) return tmp_path def test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for", "(2, 16, 16, 16) assert tf[0, 0].shape == (16, 16, 16) assert tf[0,", "100 im = np.random.rand(16, 16, 16) for w, t in product(wave, time): fname", "def lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488, 560] time = range(10) i", "16) assert tf.asarray(t=0, c=0).shape == (16, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16,", "== (2, 16, 16, 16) assert tf[0, 0].shape == (16, 16, 16) assert", "return tmp_path def test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for i in names]", "time = range(10) i = 100 im = np.random.rand(16, 16, 16) for w,", "= [488, 560] time = range(10) i = 100 im = np.random.rand(16, 16,", "423345 @pytest.fixture def lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488, 560] time =", "for i in names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder): tf =", "i, rel=t * i + ADD_REL ) tifffile.imsave(tmp_path / fname, im) return tmp_path", "in product(wave, time): fname = template.format( c=wave.index(w), w=w, t=t, ab=t * i, rel=t", "f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder): tf = LLSFolder(lls_folder) assert tf.asarray().shape == (10, 2,", "(16, 16, 16) assert tf[0, 0, :16:2].shape == (8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0])", "+ ADD_REL ) tifffile.imsave(tmp_path / fname, im) return tmp_path def test_a(lls_folder): names =", "(2, 16, 16, 16) assert tf.asarray(t=0, c=0).shape == (16, 16, 16) assert tf.asarray(t=0,", "16) for w, t in product(wave, time): fname = template.format( c=wave.index(w), w=w, t=t,", "c=0).shape == (16, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8,", ":16:2].shape == (8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0]) np.testing.assert_allclose(tf.asarray(t=0, c=0,", "* i, rel=t * i + ADD_REL ) tifffile.imsave(tmp_path / fname, im) return", "assert tf[0, 0].shape == (16, 16, 16) assert tf[0, 0, :16:2].shape == (8,", "(10, 2, 16, 16, 16) assert tf.asarray(t=0).shape == (2, 16, 16, 16) assert", "== [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder): tf = LLSFolder(lls_folder) assert tf.asarray().shape ==", "tf[0, 0, :16:2].shape == (8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0])", "= LLSFolder(lls_folder) assert tf.asarray().shape == (10, 2, 16, 16, 16) assert tf.asarray(t=0).shape ==", "(16, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16)", "== (2, 16, 16, 16) assert tf.asarray(t=0, c=0).shape == (16, 16, 16) assert", "im) return tmp_path def test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for i in", "assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16) assert tf.asarray(t=0, c=0,", "np import pytest import tifffile from tifffolder import LLSFolder ADD_REL = 423345 @pytest.fixture", "tf.asarray(t=0).shape == (2, 16, 16, 16) assert tf.asarray(t=0, c=0).shape == (16, 16, 16)", "== (8, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16,", "tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0]) np.testing.assert_allclose(tf.asarray(t=0, c=0, z=range(0, 16, 2)), tf[0, 0, :16:2])", "16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16) assert", "<gh_stars>1-10 import os from itertools import product import numpy as np import pytest", "16, 16, 16) assert tf[0, 0].shape == (16, 16, 16) assert tf[0, 0,", "test_b(lls_folder): tf = LLSFolder(lls_folder) assert tf.asarray().shape == (10, 2, 16, 16, 16) assert", "in names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder): tf = LLSFolder(lls_folder) assert", "0, :16:2].shape == (8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0]) np.testing.assert_allclose(tf.asarray(t=0,", "tifffile from tifffolder import LLSFolder ADD_REL = 423345 @pytest.fixture def lls_folder(tmp_path): template =", "import product import numpy as np import pytest import tifffile from tifffolder import", "16, 16, 16) assert tf.asarray(t=0).shape == (2, 16, 16, 16) assert tf.asarray(t=0, c=0).shape", "16, 2)).shape == (8, 16, 16) assert tf[0].shape == (2, 16, 16, 16)", "f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder): tf = LLSFolder(lls_folder) assert tf.asarray().shape == (10, 2, 16,", "== (16, 16, 16) assert tf[0, 0, :16:2].shape == (8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0),", "assert tf[0].shape == (2, 16, 16, 16) assert tf[0, 0].shape == (16, 16,", "as np import pytest import tifffile from tifffolder import LLSFolder ADD_REL = 423345", "tifffolder import LLSFolder ADD_REL = 423345 @pytest.fixture def lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave", "itertools import product import numpy as np import pytest import tifffile from tifffolder", "i + ADD_REL ) tifffile.imsave(tmp_path / fname, im) return tmp_path def test_a(lls_folder): names", "range(10) i = 100 im = np.random.rand(16, 16, 16) for w, t in", "template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488, 560] time = range(10) i = 100", "t=t, ab=t * i, rel=t * i + ADD_REL ) tifffile.imsave(tmp_path / fname,", "names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder): tf = LLSFolder(lls_folder) assert tf.asarray().shape", "= template.format( c=wave.index(w), w=w, t=t, ab=t * i, rel=t * i + ADD_REL", "fname, im) return tmp_path def test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for i", "assert tf.asarray(t=0).shape == (2, 16, 16, 16) assert tf.asarray(t=0, c=0).shape == (16, 16,", "assert [os.path.basename(i) for i in names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder):", "16, 16) assert tf[0, 0].shape == (16, 16, 16) assert tf[0, 0, :16:2].shape", "16) assert tf[0, 0].shape == (16, 16, 16) assert tf[0, 0, :16:2].shape ==", "w, t in product(wave, time): fname = template.format( c=wave.index(w), w=w, t=t, ab=t *", "z=range(0, 16, 2)).shape == (8, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape", "tmp_path def test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for i in names] ==", "tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16) assert tf[0].shape == (2,", "fname = template.format( c=wave.index(w), w=w, t=t, ab=t * i, rel=t * i +", "= 423345 @pytest.fixture def lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488, 560] time", "tifffile.imsave(tmp_path / fname, im) return tmp_path def test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i)", "import numpy as np import pytest import tifffile from tifffolder import LLSFolder ADD_REL", "16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0]) np.testing.assert_allclose(tf.asarray(t=0, c=0, z=range(0, 16, 2)), tf[0,", "w=w, t=t, ab=t * i, rel=t * i + ADD_REL ) tifffile.imsave(tmp_path /", "(8, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16)", "= np.random.rand(16, 16, 16) for w, t in product(wave, time): fname = template.format(", "(8, 16, 16) assert tf[0].shape == (2, 16, 16, 16) assert tf[0, 0].shape", "16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0]) np.testing.assert_allclose(tf.asarray(t=0, c=0, z=range(0, 16, 2)),", "* i + ADD_REL ) tifffile.imsave(tmp_path / fname, im) return tmp_path def test_a(lls_folder):", "16) assert tf[0].shape == (2, 16, 16, 16) assert tf[0, 0].shape == (16,", "names = LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for i in names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\",", "product import numpy as np import pytest import tifffile from tifffolder import LLSFolder", "0].shape == (16, 16, 16) assert tf[0, 0, :16:2].shape == (8, 16, 16)", "tf[0].shape == (2, 16, 16, 16) assert tf[0, 0].shape == (16, 16, 16)", "t in product(wave, time): fname = template.format( c=wave.index(w), w=w, t=t, ab=t * i,", "16, 16) assert tf.asarray(t=0).shape == (2, 16, 16, 16) assert tf.asarray(t=0, c=0).shape ==", "rel=t * i + ADD_REL ) tifffile.imsave(tmp_path / fname, im) return tmp_path def", "16, 16) assert tf.asarray(t=0, c=0).shape == (16, 16, 16) assert tf.asarray(t=0, c=0, z=range(0,", "c=0, z=range(0, 16, 2)).shape == (8, 16, 16) assert tf[0].shape == (2, 16,", "16) assert tf[0, 0, :16:2].shape == (8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0),", "c=0, z=range(0, 16, 2)).shape == (8, 16, 16) assert tf.asarray(t=0, c=0, z=range(0, 16,", "ADD_REL = 423345 @pytest.fixture def lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488, 560]", "16, 16) for w, t in product(wave, time): fname = template.format( c=wave.index(w), w=w,", "def test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0) assert [os.path.basename(i) for i in names] == [", "16, 16) assert tf[0].shape == (2, 16, 16, 16) assert tf[0, 0].shape ==", "z=range(0, 16, 2)).shape == (8, 16, 16) assert tf[0].shape == (2, 16, 16,", ") tifffile.imsave(tmp_path / fname, im) return tmp_path def test_a(lls_folder): names = LLSFolder(lls_folder).select_filenames(t=0) assert", "== (8, 16, 16) np.testing.assert_allclose(tf.asarray(t=0), tf[0]) np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0]) np.testing.assert_allclose(tf.asarray(t=0, c=0, z=range(0,", "2)).shape == (8, 16, 16) assert tf[0].shape == (2, 16, 16, 16) assert", "@pytest.fixture def lls_folder(tmp_path): template = \"cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif\" wave = [488, 560] time = range(10)", "== (10, 2, 16, 16, 16) assert tf.asarray(t=0).shape == (2, 16, 16, 16)", "product(wave, time): fname = template.format( c=wave.index(w), w=w, t=t, ab=t * i, rel=t *", "i in names] == [ f\"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif\", f\"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif\", ] def test_b(lls_folder): tf = LLSFolder(lls_folder)", "[488, 560] time = range(10) i = 100 im = np.random.rand(16, 16, 16)", "tf.asarray().shape == (10, 2, 16, 16, 16) assert tf.asarray(t=0).shape == (2, 16, 16," ]
[ "AntEnv as AntResetObsEnv from .half_cheetah import HalfCheetahEnv as HalfCheetahResetObsEnv from .hopper import HopperEnv", "AntResetObsEnv from .half_cheetah import HalfCheetahEnv as HalfCheetahResetObsEnv from .hopper import HopperEnv as HopperResetObsEnv", "import InvertedPendulumEnv as InvertedPendulumResetObsEnv from .pendulum import PendulumEnv as PendulumResetObsEnv from .swimmer import", "HalfCheetahEnv as HalfCheetahResetObsEnv from .hopper import HopperEnv as HopperResetObsEnv from .inverted_pendulum import InvertedPendulumEnv", "from .half_cheetah import HalfCheetahEnv as HalfCheetahResetObsEnv from .hopper import HopperEnv as HopperResetObsEnv from", "InvertedPendulumResetObsEnv from .pendulum import PendulumEnv as PendulumResetObsEnv from .swimmer import SwimmerEnv as SwimmerResetObsEnv", "as PendulumResetObsEnv from .swimmer import SwimmerEnv as SwimmerResetObsEnv from .walker2d import Walker2dEnv as", "import PendulumEnv as PendulumResetObsEnv from .swimmer import SwimmerEnv as SwimmerResetObsEnv from .walker2d import", ".half_cheetah import HalfCheetahEnv as HalfCheetahResetObsEnv from .hopper import HopperEnv as HopperResetObsEnv from .inverted_pendulum", "as HalfCheetahResetObsEnv from .hopper import HopperEnv as HopperResetObsEnv from .inverted_pendulum import InvertedPendulumEnv as", "as HopperResetObsEnv from .inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv from .pendulum import PendulumEnv as", "InvertedPendulumEnv as InvertedPendulumResetObsEnv from .pendulum import PendulumEnv as PendulumResetObsEnv from .swimmer import SwimmerEnv", "import HopperEnv as HopperResetObsEnv from .inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv from .pendulum import", "from .hopper import HopperEnv as HopperResetObsEnv from .inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv from", "HopperEnv as HopperResetObsEnv from .inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv from .pendulum import PendulumEnv", "HopperResetObsEnv from .inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv from .pendulum import PendulumEnv as PendulumResetObsEnv", ".inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv from .pendulum import PendulumEnv as PendulumResetObsEnv from .swimmer", ".ant import AntEnv as AntResetObsEnv from .half_cheetah import HalfCheetahEnv as HalfCheetahResetObsEnv from .hopper", "from .ant import AntEnv as AntResetObsEnv from .half_cheetah import HalfCheetahEnv as HalfCheetahResetObsEnv from", ".pendulum import PendulumEnv as PendulumResetObsEnv from .swimmer import SwimmerEnv as SwimmerResetObsEnv from .walker2d", "from .inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv from .pendulum import PendulumEnv as PendulumResetObsEnv from", "as InvertedPendulumResetObsEnv from .pendulum import PendulumEnv as PendulumResetObsEnv from .swimmer import SwimmerEnv as", "import HalfCheetahEnv as HalfCheetahResetObsEnv from .hopper import HopperEnv as HopperResetObsEnv from .inverted_pendulum import", "PendulumEnv as PendulumResetObsEnv from .swimmer import SwimmerEnv as SwimmerResetObsEnv from .walker2d import Walker2dEnv", "import AntEnv as AntResetObsEnv from .half_cheetah import HalfCheetahEnv as HalfCheetahResetObsEnv from .hopper import", "HalfCheetahResetObsEnv from .hopper import HopperEnv as HopperResetObsEnv from .inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv", ".hopper import HopperEnv as HopperResetObsEnv from .inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv from .pendulum", "from .pendulum import PendulumEnv as PendulumResetObsEnv from .swimmer import SwimmerEnv as SwimmerResetObsEnv from", "PendulumResetObsEnv from .swimmer import SwimmerEnv as SwimmerResetObsEnv from .walker2d import Walker2dEnv as Walker2dResetObsEnv", "as AntResetObsEnv from .half_cheetah import HalfCheetahEnv as HalfCheetahResetObsEnv from .hopper import HopperEnv as" ]
[ "parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str, help='data path') parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use cuda') parser.add_argument('--lr',", "'m': print('use MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer ==", "n:not use ') parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD)", "batch_size_train = args.batch_size * num_GPU batch_size_valid = args.batch_size * num_GPU print(\"batch_size:\",batch_size_train) num_workers =", "= {}, step = {}, lr = {}'.format(epoch, step, lr)) elif step ==", "parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional layer parameters') parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor or", "default='./data/patch_prognostic', type=str, help='data path') parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use cuda') parser.add_argument('--lr', '-lr', default='1e-4',", "< 5: lr = lr * float(1 + step + epoch * len_epoch)", "40: lr /= 10 if epoch >= 80: lr /= 10 '''warmup''' if", "print('use Adadelta') optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4) net = torch.nn.DataParallel(net, device_ids=None)", "True optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2) else: net = MODELS[('resnet50')](factor=args.way,", "help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path to save') parser.add_argument('--log_path', '-lp', default='./log/', help='log", "Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) with torch.no_grad(): for idx, (img, T, O,", "= checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net def adjust_learning_rate(optimizer, epoch, step, len_epoch): \"\"\"decrease the learning", "'-drop_group', default='3,4', help='drop groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop prob') parser.add_argument('--freeze', '-f', action='store_true',", "convolutional layer parameters') parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor or tumor_beside or fibrous_tissue') parser.add_argument('--experimentway',", "O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) loss.register_hook(lambda g: print(g)) optimizer.zero_grad()", "T) print('O:', O) img = img.to(device) output = net(img) output, T, O, at_risk,", "net(img) output, T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) T", "cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) print(\"loss:\", loss.item()) Prediction = torch.cat((Prediction, output)) Survival", "name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path to save') parser.add_argument('--log_path', '-lp', default='./log/', help='log path", "'acc': 0} summary_writer = SummaryWriter(log_path) loss_valid_best = float('inf') for epoch in range(args.start, args.end):", "rate at 200 and 300 epoch\"\"\" lr = args.lr if epoch >= 20:", "shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader)) summary_train = {'epoch':", "or combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor path to load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path',", "Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) for idx, (img, T,", "O) T = T.to(device) O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)),", "path to save') parser.add_argument('--log_path', '-lp', default='./log/', help='log path to save') parser.add_argument('--ckpt', '-ckpt', default='./',", "' 'or l(LaycaSGD)') args = parser.parse_args() cudnn.benchmark = True log_path = os.path.join(args.log_path, args.experiment_name", "default='prognosis', type=str, help='prognosis or replase') parser.add_argument('--use_std', '-std', default='use', type=str, help='use std as feature,", "= torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) with torch.no_grad(): for idx, (img,", "ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data,", "epoch) if summary_valid['loss'] < loss_valid_best: loss_vd_best = summary_valid['loss'] torch.save({'epoch': summary_train['epoch'], 'optimizer': optimizer.state_dict(), 'state_dict':", "= CI.item() return summary d_pth = args.data_path sp = ckpt_path_save + '/' +", "ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader =", "net): print(\"Use ckpt: \", args.ckpt) assert len(args.ckpt) != 0, \"Please input a valid", "torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) for idx, (img, T, O, _,", "import torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from utils.Survival_Aanlysis import SurvivalAnalysis from utils.RiskLayer", "os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data = ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth,", "help='checkpoint path to load') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--way', '-way', default='10',", "x in args.drop_group.split(',')] for block_group in drop_group: if block_group < 1 or block_group", "epoch ) print('train/loss', summary_train['loss'], epoch) print('train/CI', summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'],", "args.drop_group.split(',')] for block_group in drop_group: if block_group < 1 or block_group > 4:", "= {}'.format(epoch, step, lr)) elif step == 0: print('epoch = {}, lr={}'.format(epoch, lr))", "'Adadelta': print('use Adadelta') optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4) net = torch.nn.DataParallel(net,", "failures, ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() return", "num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader)) summary_train = {'epoch': 0, 'fp': 0, 'tp': 0,", "summary_train['CI'], epoch) if epoch % 1 == 0: torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()}, (sp", "summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch) if epoch % 1", "Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties)", "weight_decay=1e-4) net = torch.nn.DataParallel(net, device_ids=None) if args.resume: net = load_checkpoint(args, net) def train(epoch,", "optimizer.step() Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed = torch.cat((Observed, O.float()))", "O) img = img.to(device) output = net(img) output, T, O, at_risk, failures, ties,", "cox_cost from Prognostic.data.image_producer import ImageDataset from Prognostic.model import MODELS from lifelines.utils import concordance_index", "if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device = torch.device(\"cuda\" if args.use_cuda else", "weight_decay=1e-4) if args.optimizer == 's': print('use SGD') optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4)", "type=str, help='choose optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or l(LaycaSGD)') args = parser.parse_args()", "parser.add_argument('--start', '-s', default='0', type=int, help='start epoch') parser.add_argument('--end', '-e', default='10000', type=int, help='end epoch') parser.add_argument('--experiment_id',", "d_pth = args.data_path sp = ckpt_path_save + '/' + str(args.way) if not os.path.exists(sp):", "\"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() return summary d_pth = args.data_path", "help='batch size') parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker') parser.add_argument('--start', '-s', default='0', type=int, help='start epoch')", "summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch ) print('train/loss', summary_train['loss'],", "type=int, help='start epoch') parser.add_argument('--end', '-e', default='10000', type=int, help='end epoch') parser.add_argument('--experiment_id', '-eid', default='0', help='experiment", "in optimizer.param_groups: param_group['lr'] = lr return lr drop_prob = [0.] * 4 if", "betas=(0.9, 0.99), weight_decay=1e-4) if args.optimizer == 's': print('use SGD') optimizer = torch.optim.SGD(net.parameters(), momentum=0.9,", "str(args.way) if not os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data = ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway)", "'-std', default='use', type=str, help='use std as feature, u:use, o:only, n:not use ') parser.add_argument('--optimizer',", "print('valid/CI', summary_valid['CI'], epoch) if summary_valid['loss'] < loss_valid_best: loss_vd_best = summary_valid['loss'] torch.save({'epoch': summary_train['epoch'], 'optimizer':", "comma separated list of integers' 'between 1 and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group - 1]", "'-e', default='10000', type=int, help='end epoch') parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup',", "help='train tensor path to load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor path to load')", "> 4: raise ValueError( 'drop_group should be a comma separated list of integers'", "0, 'Neg': 0, 'Pos': 0} summary_valid = {'loss': float('inf'), 'acc': 0} summary_writer =", "epoch) print('train/CI', summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'], epoch) if summary_valid['loss'] <", "args.batch_size * num_GPU print(\"batch_size:\",batch_size_train) num_workers = args.num_worker * num_GPU SA = SurvivalAnalysis() def", "summary): loss_sum = 0 acc_sum = 0 net.train() pth = \"\" length =", "for param in net.fc.parameters(): param.requires_grad = True optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),", "output, T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) print('ties:', ties)", "O) print('ties:', ties) T = T.to(device) O = O.to(device) loss = cox_cost(output, at_risk,", "\"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() summary['lr'] = optimizer.param_groups[0]['lr'] return summary", "default='10', type=str, help='train way, 40 10 or combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor", "raise ValueError( 'drop_group should be a comma separated list of integers' 'between 1", "args.use_cuda else \"cpu\") num_GPU = len(args.device_ids.split(',')) batch_size_train = args.batch_size * num_GPU batch_size_valid =", ">= 80: lr /= 10 '''warmup''' if epoch < 5: lr = lr", "count) in enumerate(dataloader): if O.sum() == 0: continue N = O.shape[0] print('T:', T)", "a comma separated list of integers' 'between 1 and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group -", "O.reshape((N, 1)), failures, ties) loss.register_hook(lambda g: print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction", "for idx, (img, T, O, _, count) in enumerate(dataloader): if O.sum() == 0:", "0, \"Please input a valid ckpt_path\" checkpoint = torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict)", "args.optimizer != 'Adadelta': lr = adjust_learning_rate(optimizer, epoch, idx, len(dataloader)) img = img.to(device) output", "help='log path to save') parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path to load') parser.add_argument('--resume', '-r',", "str(args.experiment_id)) if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device = torch.device(\"cuda\" if args.use_cuda", "def load_checkpoint(args, net): print(\"Use ckpt: \", args.ckpt) assert len(args.ckpt) != 0, \"Please input", "0, 'fp': 0, 'tp': 0, 'Neg': 0, 'Pos': 0} summary_valid = {'loss': float('inf'),", "step + epoch * len_epoch) / (5. * len_epoch) print('epoch = {}, step", "lr = lr * float(1 + step + epoch * len_epoch) / (5.", "args.num_worker * num_GPU SA = SurvivalAnalysis() def load_checkpoint(args, net): print(\"Use ckpt: \", args.ckpt)", "if args.optimizer == 'a': print('use adam') optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4)", "'-o', default='a', type=str, help='choose optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or l(LaycaSGD)') args", "path to load') parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma", "momentum') parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch size') parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker') parser.add_argument('--start',", "img = img.to(device) output = net(img) output, T, O, at_risk, failures, ties, _", "loss.item() summary['CI'] = CI.item() summary['lr'] = optimizer.param_groups[0]['lr'] return summary def valid(dataloader, summary): net.eval()", "param in net.fc.parameters(): param.requires_grad = True optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr,", "+ '/' + str(epoch) + '.ckpt')) summary_valid = valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'],", "* len_epoch) / (5. * len_epoch) print('epoch = {}, step = {}, lr", "factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data))", "parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor path to load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor", "SummaryWriter(log_path) loss_valid_best = float('inf') for epoch in range(args.start, args.end): summary_train = train(epoch, train_dataloader,", "(img, T, O, _, count) in enumerate(dataloader): if O.sum() == 0: continue N", "float(1 + step + epoch * len_epoch) / (5. * len_epoch) print('epoch =", "(5. * len_epoch) print('epoch = {}, step = {}, lr = {}'.format(epoch, step,", "optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4) if args.optimizer == 'l': print('use LaycaSGD') optimizer", "torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader)) summary_train = {'epoch': 0, 'fp': 0,", "+ str(epoch) + '.ckpt')) summary_valid = valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar(", "default='3,4', help='drop groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop prob') parser.add_argument('--freeze', '-f', action='store_true', help='Freeze", "else: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer == 'a': print('use adam') optimizer =", "[0.] * 4 if args.drop_group: drop_probs = args.drop_prob drop_group = [int(x) for x", "help='experiment id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path to", "default='./tensor_path', help='train tensor path to load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor path to", "80: lr /= 10 '''warmup''' if epoch < 5: lr = lr *", "print(\"loss:\", loss.item()) Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed = torch.cat((Observed,", "at_risk, O.reshape((N, 1)), failures, ties) print(\"loss:\", loss.item()) Prediction = torch.cat((Prediction, output)) Survival =", "to load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor path to load') parser.add_argument('--alpha', '-a', default='1.0',", "output, T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) T =", "loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() return summary d_pth =", "os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.exists(ckpt_path_save):", "if summary_valid['loss'] < loss_valid_best: loss_vd_best = summary_valid['loss'] torch.save({'epoch': summary_train['epoch'], 'optimizer': optimizer.state_dict(), 'state_dict': net.state_dict()},", "valid(dataloader, summary): net.eval() length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed", "elif step == 0: print('epoch = {}, lr={}'.format(epoch, lr)) for param_group in optimizer.param_groups:", "args.optimizer == 's': print('use SGD') optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4) if args.optimizer", "if args.optimizer == 'm': print('use MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True)", "drop_prob=drop_prob, require_grad=False).to(device) for param in net.fc.parameters(): param.requires_grad = True optimizer = torch.optim.SGD(filter(lambda p:", "4: raise ValueError( 'drop_group should be a comma separated list of integers' 'between", "os.path.join(args.log_path, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save,", "nesterov=True) if args.optimizer == 'm': print('use MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4,", "'Neg': 0, 'Pos': 0} summary_valid = {'loss': float('inf'), 'acc': 0} summary_writer = SummaryWriter(log_path)", "float('inf') for epoch in range(args.start, args.end): summary_train = train(epoch, train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss',", "cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from utils.Survival_Aanlysis import SurvivalAnalysis from utils.RiskLayer import cox_cost from", "help='resume from checkpoint') parser.add_argument('--way', '-way', default='10', type=str, help='train way, 40 10 or combinate')", "T, O) print('ties:', ties) T = T.to(device) O = O.to(device) loss = cox_cost(output,", "= {'loss': float('inf'), 'acc': 0} summary_writer = SummaryWriter(log_path) loss_valid_best = float('inf') for epoch", "len_epoch) / (5. * len_epoch) print('epoch = {}, step = {}, lr =", "continue N = O.shape[0] print('T:', T) print('O:', O) if args.optimizer != 'Adadelta': lr", "learning rate at 200 and 300 epoch\"\"\" lr = args.lr if epoch >=", "lr = adjust_learning_rate(optimizer, epoch, idx, len(dataloader)) img = img.to(device) output = net(img) output,", "LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'm': print('use MinimalLaycaSGD') optimizer =", "integers' 'between 1 and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group - 1] = drop_probs / 4.0", "= 0 net.train() pth = \"\" length = len(dataloader) Prediction = torch.Tensor().to(device) Survival", "return summary def valid(dataloader, summary): net.eval() length = len(dataloader) Prediction = torch.Tensor().to(device) Survival", "* num_GPU print(\"batch_size:\",batch_size_train) num_workers = args.num_worker * num_GPU SA = SurvivalAnalysis() def load_checkpoint(args,", "SurvivalAnalysis() def load_checkpoint(args, net): print(\"Use ckpt: \", args.ckpt) assert len(args.ckpt) != 0, \"Please", "0: print('epoch = {}, lr={}'.format(epoch, lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr", "param.requires_grad = True optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2) else: net", "0} summary_valid = {'loss': float('inf'), 'acc': 0} summary_writer = SummaryWriter(log_path) loss_valid_best = float('inf')", "if not os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data = ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data", "block_group > 4: raise ValueError( 'drop_group should be a comma separated list of", "'''warmup''' if epoch < 5: lr = lr * float(1 + step +", "print(len(train_data)) print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid,", "load_checkpoint(args, net): print(\"Use ckpt: \", args.ckpt) assert len(args.ckpt) != 0, \"Please input a", "o:only, n:not use ') parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta),", "save') parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path to load') parser.add_argument('--resume', '-r', action='store_true', help='resume from", "input a valid ckpt_path\" checkpoint = torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net", "0: torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()}, (sp + '/' + str(epoch) + '.ckpt')) summary_valid", "= torch.cat((Survival, T.float())) Observed = torch.cat((Observed, O.float())) Prediction, Survival, Observed, at_risk, failures, ties,", "CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\",", "lr=args.lr, weight_decay=1e-2) else: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer == 'a': print('use adam')", "summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch ) print('train/loss', summary_train['loss'], epoch) print('train/CI',", "parser.add_argument('--use_std', '-std', default='use', type=str, help='use std as feature, u:use, o:only, n:not use ')", "if args.freeze: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for param in net.fc.parameters(): param.requires_grad =", "N = O.shape[0] print('T:', T) print('O:', O) if args.optimizer != 'Adadelta': lr =", "os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device = torch.device(\"cuda\" if args.use_cuda else \"cpu\") num_GPU = len(args.device_ids.split(','))", "'-ckpt_s', default='./model/', help='checkpoint path to save') parser.add_argument('--log_path', '-lp', default='./log/', help='log path to save')", "utils.Survival_Aanlysis import SurvivalAnalysis from utils.RiskLayer import cox_cost from Prognostic.data.image_producer import ImageDataset from Prognostic.model", "pth = \"\" length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed", "torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4) net = torch.nn.DataParallel(net, device_ids=None) if args.resume: net =", "parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/',", "= os.path.join(args.ckpt_path_save, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] =", "= CI.item() summary['lr'] = optimizer.param_groups[0]['lr'] return summary def valid(dataloader, summary): net.eval() length =", "failures, ties, _ = SA.calc_at_risk(output, T, O) T = T.to(device) O = O.to(device)", "rate') parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD momentum') parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch size')", "'-a', default='1.0', type=float, help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated indices of GPU", "'-lpth_v', default='./tensor_path', help='valid tensor path to load') parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup alpha')", "else \"cpu\") num_GPU = len(args.device_ids.split(',')) batch_size_train = args.batch_size * num_GPU batch_size_valid = args.batch_size", "return lr drop_prob = [0.] * 4 if args.drop_group: drop_probs = args.drop_prob drop_group", "use,' ' e.g. 0,1 for using GPU_0' ' and GPU_1, default 0.') parser.add_argument('--drop_group',", "be a comma separated list of integers' 'between 1 and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group", "adjust_learning_rate(optimizer, epoch, idx, len(dataloader)) img = img.to(device) output = net(img) output, T, O,", "Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item()", "= load_checkpoint(args, net) def train(epoch, dataloader, summary): loss_sum = 0 acc_sum = 0", "= O.shape[0] print('T:', T) print('O:', O) img = img.to(device) output = net(img) output,", "{}, step = {}, lr = {}'.format(epoch, step, lr)) elif step == 0:", "= MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for param in net.fc.parameters(): param.requires_grad = True optimizer =", "print(\"Use ckpt: \", args.ckpt) assert len(args.ckpt) != 0, \"Please input a valid ckpt_path\"", "time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str, help='data path') parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use cuda')", "'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch ) print('train/loss',", "Observed = torch.Tensor().to(device) with torch.no_grad(): for idx, (img, T, O, _, count) in", "+ str(args.experiment_id)) if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device = torch.device(\"cuda\" if", "print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival,", "load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor path to load') parser.add_argument('--alpha', '-a', default='1.0', type=float,", "idx, len(dataloader)) img = img.to(device) output = net(img) output, T, O, at_risk, failures,", "summary_writer = SummaryWriter(log_path) loss_valid_best = float('inf') for epoch in range(args.start, args.end): summary_train =", "'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or l(LaycaSGD)') args = parser.parse_args() cudnn.benchmark = True log_path =", "factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True)", "help='drop groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop prob') parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional", "< loss_valid_best: loss_vd_best = summary_valid['loss'] torch.save({'epoch': summary_train['epoch'], 'optimizer': optimizer.state_dict(), 'state_dict': net.state_dict()}, os.path.join(sp, 'best.ckpt'))", "== 0: print('epoch = {}, lr={}'.format(epoch, lr)) for param_group in optimizer.param_groups: param_group['lr'] =", "T, O, _, count) in enumerate(dataloader): N = O.shape[0] print('T:', T) print('O:', O)", "/ (5. * len_epoch) print('epoch = {}, step = {}, lr = {}'.format(epoch,", "and 300 epoch\"\"\" lr = args.lr if epoch >= 20: lr /= 10", "== 0: torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()}, (sp + '/' + str(epoch) + '.ckpt'))", "default='2', type=int, help='num_worker') parser.add_argument('--start', '-s', default='0', type=int, help='start epoch') parser.add_argument('--end', '-e', default='10000', type=int,", "img.to(device) output = net(img) output, T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output,", "from tensorboardX import SummaryWriter import torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from utils.Survival_Aanlysis", "as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from utils.Survival_Aanlysis import SurvivalAnalysis from utils.RiskLayer import cox_cost", "path to load') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--way', '-way', default='10', type=str,", "cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI']", "(sp + '/' + str(epoch) + '.ckpt')) summary_valid = valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss',", "parser.add_argument('--way', '-way', default='10', type=str, help='train way, 40 10 or combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path',", "if not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name + \"_\" + str(args.experiment_id)) if", "O.sum() == 0: continue N = O.shape[0] print('T:', T) print('O:', O) if args.optimizer", "in net.fc.parameters(): param.requires_grad = True optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2)", "= ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False, type_key=args.type_key,", "summary_train = train(epoch, train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch)", "idx, (img, T, O, _, count) in enumerate(dataloader): N = O.shape[0] print('T:', T)", "{'epoch': 0, 'fp': 0, 'tp': 0, 'Neg': 0, 'Pos': 0} summary_valid = {'loss':", "u:use, o:only, n:not use ') parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose optimizer:a(adam), s(sgd), '", "'-eway', default='prognosis', type=str, help='prognosis or replase') parser.add_argument('--use_std', '-std', default='use', type=str, help='use std as", "MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer == 'a': print('use adam') optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9,", "a valid ckpt_path\" checkpoint = torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net def", "save') parser.add_argument('--log_path', '-lp', default='./log/', help='log path to save') parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path", "= args.lr if epoch >= 20: lr /= 10 if epoch >= 40:", "20: lr /= 10 if epoch >= 40: lr /= 10 if epoch", "= O.shape[0] print('T:', T) print('O:', O) if args.optimizer != 'Adadelta': lr = adjust_learning_rate(optimizer,", "from lifelines.utils import concordance_index from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD parser = argparse.ArgumentParser(description='Predicting survival", "drop_prob[block_group - 1] = drop_probs / 4.0 ** (4 - block_group) if args.freeze:", "step = {}, lr = {}'.format(epoch, step, lr)) elif step == 0: print('epoch", "drop_group: if block_group < 1 or block_group > 4: raise ValueError( 'drop_group should", "_ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss = cox_cost(Prediction,", "1] = drop_probs / 4.0 ** (4 - block_group) if args.freeze: net =", "parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker') parser.add_argument('--start', '-s', default='0', type=int, help='start epoch') parser.add_argument('--end', '-e',", "to load') parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated", "pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net def adjust_learning_rate(optimizer, epoch, step, len_epoch): \"\"\"decrease the", "'-f', action='store_true', help='Freeze convolutional layer parameters') parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor or tumor_beside", "args.ckpt) assert len(args.ckpt) != 0, \"Please input a valid ckpt_path\" checkpoint = torch.load(args.ckpt)", "MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'Adadelta': print('use Adadelta') optimizer =", "from utils.Survival_Aanlysis import SurvivalAnalysis from utils.RiskLayer import cox_cost from Prognostic.data.image_producer import ImageDataset from", "summary_valid['CI'], epoch) if summary_valid['loss'] < loss_valid_best: loss_vd_best = summary_valid['loss'] torch.save({'epoch': summary_train['epoch'], 'optimizer': optimizer.state_dict(),", "default='True', type=bool, help='use cuda') parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning rate') parser.add_argument('--momentum', '-mom', default='0.9',", "torch.Tensor().to(device) Observed = torch.Tensor().to(device) with torch.no_grad(): for idx, (img, T, O, _, count)", "\"Please input a valid ckpt_path\" checkpoint = torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return", "way=\"valid\", factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True,", "parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor path to load') parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup", "default='a', type=str, help='choose optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or l(LaycaSGD)') args =", "if args.optimizer == 'Adadelta': print('use Adadelta') optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4)", "SA.calc_at_risk(output, T, O) T = T.to(device) O = O.to(device) loss = cox_cost(output, at_risk,", "torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()}, (sp + '/' + str(epoch) + '.ckpt')) summary_valid =", "= ckpt_path_save + '/' + str(args.way) if not os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data =", "SA = SurvivalAnalysis() def load_checkpoint(args, net): print(\"Use ckpt: \", args.ckpt) assert len(args.ckpt) !=", "' and GPU_1, default 0.') parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1',", "= True log_path = os.path.join(args.log_path, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.isdir(log_path):", "if args.drop_group: drop_probs = args.drop_prob drop_group = [int(x) for x in args.drop_group.split(',')] for", "epoch >= 80: lr /= 10 '''warmup''' if epoch < 5: lr =", "lr={}'.format(epoch, lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr return lr drop_prob =", "optimizer.param_groups: param_group['lr'] = lr return lr drop_prob = [0.] * 4 if args.drop_group:", "torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\",", "type=float, help='learning rate') parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD momentum') parser.add_argument('--batch_size', '-b', default='5', type=int,", "default='0.9', type=float, help='SGD momentum') parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch size') parser.add_argument('--num_worker', '-nw', default='2',", "epoch in range(args.start, args.end): summary_train = train(epoch, train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch)", "path') parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use cuda') parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning rate')", "help='data path') parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use cuda') parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning", "optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2) else: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device)", "default='10000', type=int, help='end epoch') parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment", "range(args.start, args.end): summary_train = train(epoch, train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI',", "print(d_pth) train_data = ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth, way=\"valid\", factor=args.way,", "summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch) if epoch % 1 == 0: torch.save({'epoch': summary_train['epoch'], 'state_dict':", "in range(args.start, args.end): summary_train = train(epoch, train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar(", "type=str, help='data path') parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use cuda') parser.add_argument('--lr', '-lr', default='1e-4', type=float,", "'a': print('use adam') optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4) if args.optimizer ==", "tensor path to load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor path to load') parser.add_argument('--alpha',", "= torch.cat((Observed, O.float())) Prediction, Survival, Observed, at_risk, failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(),", "with torch.no_grad(): for idx, (img, T, O, _, count) in enumerate(dataloader): N =", "or tumor_beside or fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis or replase') parser.add_argument('--use_std', '-std',", "= {}, lr = {}'.format(epoch, step, lr)) elif step == 0: print('epoch =", "the learning rate at 200 and 300 epoch\"\"\" lr = args.lr if epoch", "+ '/' + str(args.way) if not os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data = ImageDataset(d_pth, factor=args.way,", "train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False,", "= SummaryWriter(log_path) loss_valid_best = float('inf') for epoch in range(args.start, args.end): summary_train = train(epoch,", "lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr return lr drop_prob = [0.]", "args.optimizer == 'a': print('use adam') optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4) if", "0, 'tp': 0, 'Neg': 0, 'Pos': 0} summary_valid = {'loss': float('inf'), 'acc': 0}", "+ '/../../') from utils.Survival_Aanlysis import SurvivalAnalysis from utils.RiskLayer import cox_cost from Prognostic.data.image_producer import", "step, len_epoch): \"\"\"decrease the learning rate at 200 and 300 epoch\"\"\" lr =", "O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) print('ties:', ties) T =", "os.mkdir(sp) print(d_pth) train_data = ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth, way=\"valid\",", "40 10 or combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor path to load') parser.add_argument('--load_pth_valid',", "args.batch_size * num_GPU batch_size_valid = args.batch_size * num_GPU print(\"batch_size:\",batch_size_train) num_workers = args.num_worker *", "10 '''warmup''' if epoch < 5: lr = lr * float(1 + step", "net.parameters()), lr=args.lr, weight_decay=1e-2) else: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer == 'a': print('use", "parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor or tumor_beside or fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis', type=str,", "T.float())) Observed = torch.cat((Observed, O.float())) Prediction, Survival, Observed, at_risk, failures, ties, _ =", "== 0: continue N = O.shape[0] print('T:', T) print('O:', O) if args.optimizer !=", "'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch ) print('train/loss', summary_train['loss'], epoch) print('train/CI', summary_train['CI'],", "0, 'Pos': 0} summary_valid = {'loss': float('inf'), 'acc': 0} summary_writer = SummaryWriter(log_path) loss_valid_best", "'-r', action='store_true', help='resume from checkpoint') parser.add_argument('--way', '-way', default='10', type=str, help='train way, 40 10", "help='learning rate') parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD momentum') parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch", "epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch ) print('train/loss', summary_train['loss'], epoch) print('train/CI', summary_train['CI'], epoch) print('valid/loss',", "type=str, help='train way, 40 10 or combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor path", "import MinimalLaycaSGD, LaycaSGD parser = argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str, help='data", "parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--way', '-way', default='10', type=str, help='train way, 40", "T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) T = T.to(device)", "= torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4) if args.optimizer == 's': print('use SGD') optimizer", "to save') parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path to load') parser.add_argument('--resume', '-r', action='store_true', help='resume", "help='end epoch') parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save',", "0 acc_sum = 0 net.train() pth = \"\" length = len(dataloader) Prediction =", "summary_valid['loss'] < loss_valid_best: loss_vd_best = summary_valid['loss'] torch.save({'epoch': summary_train['epoch'], 'optimizer': optimizer.state_dict(), 'state_dict': net.state_dict()}, os.path.join(sp,", "= optimizer.param_groups[0]['lr'] return summary def valid(dataloader, summary): net.eval() length = len(dataloader) Prediction =", "summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch", "lr /= 10 if epoch >= 80: lr /= 10 '''warmup''' if epoch", "len_epoch) print('epoch = {}, step = {}, lr = {}'.format(epoch, step, lr)) elif", "help='train way, 40 10 or combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor path to", "= args.batch_size * num_GPU batch_size_valid = args.batch_size * num_GPU print(\"batch_size:\",batch_size_train) num_workers = args.num_worker", "args.drop_group: drop_probs = args.drop_prob drop_group = [int(x) for x in args.drop_group.split(',')] for block_group", "LaycaSGD parser = argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str, help='data path') parser.add_argument('--use_cuda',", "print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'], epoch) if summary_valid['loss'] < loss_valid_best: loss_vd_best = summary_valid['loss']", "acc_sum = 0 net.train() pth = \"\" length = len(dataloader) Prediction = torch.Tensor().to(device)", "from Prognostic.data.image_producer import ImageDataset from Prognostic.model import MODELS from lifelines.utils import concordance_index from", "summary_train['loss'], epoch) print('train/CI', summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'], epoch) if summary_valid['loss']", "print('T:', T) print('O:', O) img = img.to(device) output = net(img) output, T, O,", "1)), failures, ties) print(\"loss:\", loss.item()) Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float()))", "assert len(args.ckpt) != 0, \"Please input a valid ckpt_path\" checkpoint = torch.load(args.ckpt) pretrained_dict", "args.end): summary_train = train(epoch, train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'],", "args.lr if epoch >= 20: lr /= 10 if epoch >= 40: lr", "summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch) if epoch % 1 ==", "'-drop_prob', default='0.1', type=float, help='drop prob') parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional layer parameters') parser.add_argument('--type-key',", "'-mom', default='0.9', type=float, help='SGD momentum') parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch size') parser.add_argument('--num_worker', '-nw',", "path to load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor path to load') parser.add_argument('--alpha', '-a',", "dataloader, summary): loss_sum = 0 acc_sum = 0 net.train() pth = \"\" length", "loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() summary['lr'] = optimizer.param_groups[0]['lr'] return", "def valid(dataloader, summary): net.eval() length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device)", "Prognostic.data.image_producer import ImageDataset from Prognostic.model import MODELS from lifelines.utils import concordance_index from utils.LaycaOptimizer", "'-lpth_t', default='./tensor_path', help='train tensor path to load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor path", "as feature, u:use, o:only, n:not use ') parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose optimizer:a(adam),", "1 or block_group > 4: raise ValueError( 'drop_group should be a comma separated", "default 0.') parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop prob')", "* float(1 + step + epoch * len_epoch) / (5. * len_epoch) print('epoch", "args.freeze: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for param in net.fc.parameters(): param.requires_grad = True", "loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed", "list of integers' 'between 1 and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group - 1] = drop_probs", "summary_train = {'epoch': 0, 'fp': 0, 'tp': 0, 'Neg': 0, 'Pos': 0} summary_valid", "at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) print('ties:', ties) T = T.to(device)", "summary['loss'] = loss.item() summary['CI'] = CI.item() return summary d_pth = args.data_path sp =", "= parser.parse_args() cudnn.benchmark = True log_path = os.path.join(args.log_path, args.experiment_name + \"_\" + str(args.experiment_id))", "not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name + \"_\" + str(args.experiment_id)) if not", "ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() summary['lr'] =", "drop_group = [int(x) for x in args.drop_group.split(',')] for block_group in drop_group: if block_group", "torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed = torch.cat((Observed, O.float())) Prediction, Survival, Observed,", "print(\"batch_size:\",batch_size_train) num_workers = args.num_worker * num_GPU SA = SurvivalAnalysis() def load_checkpoint(args, net): print(\"Use", "default='./log/', help='log path to save') parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path to load') parser.add_argument('--resume',", "parameters') parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor or tumor_beside or fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis',", "'-s', default='0', type=int, help='start epoch') parser.add_argument('--end', '-e', default='10000', type=int, help='end epoch') parser.add_argument('--experiment_id', '-eid',", "length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) for", "T, O, _, count) in enumerate(dataloader): if O.sum() == 0: continue N =", "count) in enumerate(dataloader): N = O.shape[0] print('T:', T) print('O:', O) img = img.to(device)", "loss_valid_best = float('inf') for epoch in range(args.start, args.end): summary_train = train(epoch, train_dataloader, summary_train)", "default='1e-4', type=float, help='learning rate') parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD momentum') parser.add_argument('--batch_size', '-b', default='5',", "if args.optimizer != 'Adadelta': lr = adjust_learning_rate(optimizer, epoch, idx, len(dataloader)) img = img.to(device)", "failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss", "not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device = torch.device(\"cuda\" if args.use_cuda else \"cpu\")", "'train/CI', summary_train['CI'], epoch) if epoch % 1 == 0: torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()},", "= train(epoch, train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch) if", "'-use_cuda', default='True', type=bool, help='use cuda') parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning rate') parser.add_argument('--momentum', '-mom',", "eps=1e-06, weight_decay=1e-4) net = torch.nn.DataParallel(net, device_ids=None) if args.resume: net = load_checkpoint(args, net) def", "separated indices of GPU to use,' ' e.g. 0,1 for using GPU_0' '", "at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) T = T.to(device) O =", "MinimalLaycaSGD, LaycaSGD parser = argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str, help='data path')", "alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated indices of GPU to use,' ' e.g.", "fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis or replase') parser.add_argument('--use_std', '-std', default='use', type=str, help='use", "O.float())) Prediction, Survival, Observed, at_risk, failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI", "'-d_p', default='./data/patch_prognostic', type=str, help='data path') parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use cuda') parser.add_argument('--lr', '-lr',", "ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss =", "== 'm': print('use MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer", "summary_valid = {'loss': float('inf'), 'acc': 0} summary_writer = SummaryWriter(log_path) loss_valid_best = float('inf') for", "1 and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group - 1] = drop_probs / 4.0 ** (4", "O.reshape((N, 1)), failures, ties) print(\"loss:\", loss.item()) Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival,", "optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4) if args.optimizer == 's': print('use SGD')", "'-eid', default='0', help='experiment id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint", "lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'Adadelta': print('use Adadelta') optimizer = torch.optim.Adadelta(net.parameters(),", "if epoch >= 20: lr /= 10 if epoch >= 40: lr /=", "type=float, help='drop prob') parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional layer parameters') parser.add_argument('--type-key', '-type-key', default='tumor',", "val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader", "parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path to load') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')", "parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use cuda') parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning rate') parser.add_argument('--momentum',", "ckpt_path\" checkpoint = torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net def adjust_learning_rate(optimizer, epoch,", "CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() return summary d_pth = args.data_path sp", "epoch, idx, len(dataloader)) img = img.to(device) output = net(img) output, T, O, at_risk,", "epoch) print('valid/CI', summary_valid['CI'], epoch) if summary_valid['loss'] < loss_valid_best: loss_vd_best = summary_valid['loss'] torch.save({'epoch': summary_train['epoch'],", "+ \"_\" + str(args.experiment_id)) if not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name +", "= adjust_learning_rate(optimizer, epoch, idx, len(dataloader)) img = img.to(device) output = net(img) output, T,", "1 == 0: torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()}, (sp + '/' + str(epoch) +", "'-nw', default='2', type=int, help='num_worker') parser.add_argument('--start', '-s', default='0', type=int, help='start epoch') parser.add_argument('--end', '-e', default='10000',", "return net def adjust_learning_rate(optimizer, epoch, step, len_epoch): \"\"\"decrease the learning rate at 200", "< 1 or block_group > 4: raise ValueError( 'drop_group should be a comma", "args.optimizer == 'Adadelta': print('use Adadelta') optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4) net", "train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch) if epoch %", "summary_train['epoch'], 'state_dict': net.state_dict()}, (sp + '/' + str(epoch) + '.ckpt')) summary_valid = valid(valid_dataloader,", "+ step + epoch * len_epoch) / (5. * len_epoch) print('epoch = {},", "len(train_dataloader)) summary_train = {'epoch': 0, 'fp': 0, 'tp': 0, 'Neg': 0, 'Pos': 0}", "parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis or replase') parser.add_argument('--use_std', '-std', default='use', type=str, help='use std", "MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for param in net.fc.parameters(): param.requires_grad = True optimizer = torch.optim.SGD(filter(lambda", "ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() return summary", "0} summary_writer = SummaryWriter(log_path) loss_valid_best = float('inf') for epoch in range(args.start, args.end): summary_train", "output = net(img) output, T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T,", "weight_decay=1e-4, nesterov=True) if args.optimizer == 'Adadelta': print('use Adadelta') optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9,", "T.to(device) O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) loss.register_hook(lambda", "concordance_index from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD parser = argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path', '-d_p',", "parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD momentum') parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch size') parser.add_argument('--num_worker',", "= args.drop_prob drop_group = [int(x) for x in args.drop_group.split(',')] for block_group in drop_group:", "args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device", "in enumerate(dataloader): N = O.shape[0] print('T:', T) print('O:', O) img = img.to(device) output", "/ 4.0 ** (4 - block_group) if args.freeze: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device)", "type=str, help='comma separated indices of GPU to use,' ' e.g. 0,1 for using", "lr = args.lr if epoch >= 20: lr /= 10 if epoch >=", "adam') optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4) if args.optimizer == 's': print('use", "torch.nn.DataParallel(net, device_ids=None) if args.resume: net = load_checkpoint(args, net) def train(epoch, dataloader, summary): loss_sum", "loss.item() summary['CI'] = CI.item() return summary d_pth = args.data_path sp = ckpt_path_save +", "/= 10 if epoch >= 80: lr /= 10 '''warmup''' if epoch <", "= ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train,", "0.99), weight_decay=1e-4) if args.optimizer == 's': print('use SGD') optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr,", "parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated indices of GPU to use,' ' e.g. 0,1", "0: continue N = O.shape[0] print('T:', T) print('O:', O) if args.optimizer != 'Adadelta':", "= loss.item() summary['CI'] = CI.item() summary['lr'] = optimizer.param_groups[0]['lr'] return summary def valid(dataloader, summary):", "log_path = os.path.join(args.log_path, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save", "= os.path.join(args.log_path, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save =", "concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\", loss.item(), \"CI:\",", "epoch) if epoch % 1 == 0: torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()}, (sp +", "parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop prob') parser.add_argument('--freeze', '-f',", "') parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or", "= SurvivalAnalysis() def load_checkpoint(args, net): print(\"Use ckpt: \", args.ckpt) assert len(args.ckpt) != 0,", "0,1 for using GPU_0' ' and GPU_1, default 0.') parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop", "num_GPU print(\"batch_size:\",batch_size_train) num_workers = args.num_worker * num_GPU SA = SurvivalAnalysis() def load_checkpoint(args, net):", "GPU_0' ' and GPU_1, default 0.') parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups') parser.add_argument('--drop_prob', '-drop_prob',", "T, O) T = T.to(device) O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N,", "args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name", "valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader)) summary_train = {'epoch': 0,", "= float('inf') for epoch in range(args.start, args.end): summary_train = train(epoch, train_dataloader, summary_train) summary_writer.add_scalar(", "SummaryWriter import torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from utils.Survival_Aanlysis import SurvivalAnalysis from", "m(MinimalLaycaSGD) ' 'or l(LaycaSGD)') args = parser.parse_args() cudnn.benchmark = True log_path = os.path.join(args.log_path,", "= SA.calc_at_risk(output, T, O) T = T.to(device) O = O.to(device) loss = cox_cost(output,", "!= 'Adadelta': lr = adjust_learning_rate(optimizer, epoch, idx, len(dataloader)) img = img.to(device) output =", "os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device = torch.device(\"cuda\" if args.use_cuda else \"cpu\") num_GPU =", "action='store_true', help='resume from checkpoint') parser.add_argument('--way', '-way', default='10', type=str, help='train way, 40 10 or", "= args.batch_size * num_GPU print(\"batch_size:\",batch_size_train) num_workers = args.num_worker * num_GPU SA = SurvivalAnalysis()", "import SummaryWriter import torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from utils.Survival_Aanlysis import SurvivalAnalysis", "torch.Tensor().to(device) for idx, (img, T, O, _, count) in enumerate(dataloader): if O.sum() ==", "help='Freeze convolutional layer parameters') parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor or tumor_beside or fibrous_tissue')", "net = torch.nn.DataParallel(net, device_ids=None) if args.resume: net = load_checkpoint(args, net) def train(epoch, dataloader,", "* num_GPU SA = SurvivalAnalysis() def load_checkpoint(args, net): print(\"Use ckpt: \", args.ckpt) assert", "ties) print(\"loss:\", loss.item()) Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed =", "% 1 == 0: torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()}, (sp + '/' + str(epoch)", "weight_decay=1e-4, nesterov=True) if args.optimizer == 'm': print('use MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9,", "if args.resume: net = load_checkpoint(args, net) def train(epoch, dataloader, summary): loss_sum = 0", "/= 10 '''warmup''' if epoch < 5: lr = lr * float(1 +", "O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) T = T.to(device) O", "param_group in optimizer.param_groups: param_group['lr'] = lr return lr drop_prob = [0.] * 4", "O, _, count) in enumerate(dataloader): N = O.shape[0] print('T:', T) print('O:', O) img", "O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) print(\"loss:\", loss.item()) Prediction =", "os import sys import torch import torch.utils.data from tensorboardX import SummaryWriter import torch.backends.cudnn", "optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float()))", "default='0.1', type=float, help='drop prob') parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional layer parameters') parser.add_argument('--type-key', '-type-key',", "= SA.calc_at_risk(output, T, O) print('ties:', ties) T = T.to(device) O = O.to(device) loss", "summary['lr'] = optimizer.param_groups[0]['lr'] return summary def valid(dataloader, summary): net.eval() length = len(dataloader) Prediction", "net.state_dict()}, (sp + '/' + str(epoch) + '.ckpt')) summary_valid = valid(valid_dataloader, summary_valid) summary_writer.add_scalar(", "drop_probs / 4.0 ** (4 - block_group) if args.freeze: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob,", "str(args.experiment_id)) if not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name + \"_\" + str(args.experiment_id))", "type_key=args.type_key, ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader", "ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers,", "\"\" length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device)", "SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device),", "\"_\" + str(args.experiment_id)) if not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name + \"_\"", "groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop prob') parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional layer", "import concordance_index from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD parser = argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path',", "rho=0.9, eps=1e-06, weight_decay=1e-4) net = torch.nn.DataParallel(net, device_ids=None) if args.resume: net = load_checkpoint(args, net)", "torch.device(\"cuda\" if args.use_cuda else \"cpu\") num_GPU = len(args.device_ids.split(',')) batch_size_train = args.batch_size * num_GPU", "torch.cat((Survival, T.float())) Observed = torch.cat((Observed, O.float())) Prediction, Survival, Observed, at_risk, failures, ties, _", "default='5', type=int, help='batch size') parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker') parser.add_argument('--start', '-s', default='0', type=int,", "len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) for idx, (img,", "print('epoch = {}, step = {}, lr = {}'.format(epoch, step, lr)) elif step", ") print('train/loss', summary_train['loss'], epoch) print('train/CI', summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'], epoch)", "= concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\", loss.item(),", "in enumerate(dataloader): if O.sum() == 0: continue N = O.shape[0] print('T:', T) print('O:',", "Prediction, Survival, Observed, at_risk, failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI =", "parser.add_argument('--end', '-e', default='10000', type=int, help='end epoch') parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id') parser.add_argument('--experiment_name', '-name',", "N = O.shape[0] print('T:', T) print('O:', O) img = img.to(device) output = net(img)", "parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch size') parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker') parser.add_argument('--start', '-s',", "action='store_true', help='Freeze convolutional layer parameters') parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor or tumor_beside or", "batch_size_valid = args.batch_size * num_GPU print(\"batch_size:\",batch_size_train) num_workers = args.num_worker * num_GPU SA =", "help='valid tensor path to load') parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4',", "default='0', help='experiment id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path", "l(LaycaSGD)') args = parser.parse_args() cudnn.benchmark = True log_path = os.path.join(args.log_path, args.experiment_name + \"_\"", "or block_group > 4: raise ValueError( 'drop_group should be a comma separated list", "= net(img) output, T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O)", "print(\"length:\", len(train_dataloader)) summary_train = {'epoch': 0, 'fp': 0, 'tp': 0, 'Neg': 0, 'Pos':", "O) if args.optimizer != 'Adadelta': lr = adjust_learning_rate(optimizer, epoch, idx, len(dataloader)) img =", "'Pos': 0} summary_valid = {'loss': float('inf'), 'acc': 0} summary_writer = SummaryWriter(log_path) loss_valid_best =", "args.device_ids device = torch.device(\"cuda\" if args.use_cuda else \"cpu\") num_GPU = len(args.device_ids.split(',')) batch_size_train =", "print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() summary['lr'] = optimizer.param_groups[0]['lr']", "parser.add_argument('--log_path', '-lp', default='./log/', help='log path to save') parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path to", "'-lp', default='./log/', help='log path to save') parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path to load')", "help='use std as feature, u:use, o:only, n:not use ') parser.add_argument('--optimizer', '-o', default='a', type=str,", "= len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) with torch.no_grad():", "5: lr = lr * float(1 + step + epoch * len_epoch) /", "= torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4) if args.optimizer == 'l': print('use LaycaSGD') optimizer =", "tensor path to load') parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str,", "epoch % 1 == 0: torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()}, (sp + '/' +", "default='./', help='checkpoint path to load') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--way', '-way',", "summary['CI'] = CI.item() summary['lr'] = optimizer.param_groups[0]['lr'] return summary def valid(dataloader, summary): net.eval() length", "SGD') optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4) if args.optimizer == 'l': print('use LaycaSGD')", "= args.num_worker * num_GPU SA = SurvivalAnalysis() def load_checkpoint(args, net): print(\"Use ckpt: \",", "= \"\" length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed =", "Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed = torch.cat((Observed, O.float())) Prediction,", "utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD parser = argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str,", "if block_group < 1 or block_group > 4: raise ValueError( 'drop_group should be", "CI.item() summary['lr'] = optimizer.param_groups[0]['lr'] return summary def valid(dataloader, summary): net.eval() length = len(dataloader)", "0 net.train() pth = \"\" length = len(dataloader) Prediction = torch.Tensor().to(device) Survival =", "help='start epoch') parser.add_argument('--end', '-e', default='10000', type=int, help='end epoch') parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id')", "print('T:', T) print('O:', O) if args.optimizer != 'Adadelta': lr = adjust_learning_rate(optimizer, epoch, idx,", "p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2) else: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer == 'a':", "ImageDataset from Prognostic.model import MODELS from lifelines.utils import concordance_index from utils.LaycaOptimizer import MinimalLaycaSGD,", "= cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item()", "batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader))", "= [int(x) for x in args.drop_group.split(',')] for block_group in drop_group: if block_group <", "MODELS from lifelines.utils import concordance_index from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD parser = argparse.ArgumentParser(description='Predicting", "- block_group) if args.freeze: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for param in net.fc.parameters():", "summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch ) print('train/loss', summary_train['loss'], epoch) print('train/CI', summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']),", "if args.optimizer == 'l': print('use LaycaSGD') optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True)", "Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) with torch.no_grad(): for idx,", "if epoch >= 80: lr /= 10 '''warmup''' if epoch < 5: lr", "help='num_worker') parser.add_argument('--start', '-s', default='0', type=int, help='start epoch') parser.add_argument('--end', '-e', default='10000', type=int, help='end epoch')", "'/../../') from utils.Survival_Aanlysis import SurvivalAnalysis from utils.RiskLayer import cox_cost from Prognostic.data.image_producer import ImageDataset", "import cox_cost from Prognostic.data.image_producer import ImageDataset from Prognostic.model import MODELS from lifelines.utils import", "(img, T, O, _, count) in enumerate(dataloader): N = O.shape[0] print('T:', T) print('O:',", "= torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4) net = torch.nn.DataParallel(net, device_ids=None) if args.resume: net", "ties) loss.register_hook(lambda g: print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction = torch.cat((Prediction, output))", "print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers,", "parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path to save') parser.add_argument('--log_path',", "CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() summary['lr'] = optimizer.param_groups[0]['lr'] return summary def", "batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader)) summary_train = {'epoch': 0, 'fp': 0, 'tp':", "prob') parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional layer parameters') parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor", "ckpt: \", args.ckpt) assert len(args.ckpt) != 0, \"Please input a valid ckpt_path\" checkpoint", "if args.optimizer == 's': print('use SGD') optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4) if", "args.data_path sp = ckpt_path_save + '/' + str(args.way) if not os.path.exists(sp): os.mkdir(sp) print(d_pth)", "epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch ) print('train/loss', summary_train['loss'], epoch)", "'-name', default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path to save') parser.add_argument('--log_path', '-lp',", "feature, u:use, o:only, n:not use ') parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose optimizer:a(adam), s(sgd),", "== 's': print('use SGD') optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4) if args.optimizer ==", "net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for param in net.fc.parameters(): param.requires_grad = True optimizer", "summary d_pth = args.data_path sp = ckpt_path_save + '/' + str(args.way) if not", "'state_dict': net.state_dict()}, (sp + '/' + str(epoch) + '.ckpt')) summary_valid = valid(valid_dataloader, summary_valid)", "'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch) if epoch % 1 == 0:", "' 'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or l(LaycaSGD)') args = parser.parse_args() cudnn.benchmark = True log_path", "print('use MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'Adadelta':", "+ epoch * len_epoch) / (5. * len_epoch) print('epoch = {}, step =", "size') parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker') parser.add_argument('--start', '-s', default='0', type=int, help='start epoch') parser.add_argument('--end',", "optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or l(LaycaSGD)') args = parser.parse_args() cudnn.benchmark =", "Prognostic.model import MODELS from lifelines.utils import concordance_index from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD parser", "failures, ties) loss.register_hook(lambda g: print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction = torch.cat((Prediction,", "help='checkpoint path to save') parser.add_argument('--log_path', '-lp', default='./log/', help='log path to save') parser.add_argument('--ckpt', '-ckpt',", "load_checkpoint(args, net) def train(epoch, dataloader, summary): loss_sum = 0 acc_sum = 0 net.train()", "0.') parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop prob') parser.add_argument('--freeze',", "= torch.device(\"cuda\" if args.use_cuda else \"cpu\") num_GPU = len(args.device_ids.split(',')) batch_size_train = args.batch_size *", "O.shape[0] print('T:', T) print('O:', O) img = img.to(device) output = net(img) output, T,", "Survival, Observed, at_risk, failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(),", "default='use', type=str, help='use std as feature, u:use, o:only, n:not use ') parser.add_argument('--optimizer', '-o',", "= args.device_ids device = torch.device(\"cuda\" if args.use_cuda else \"cpu\") num_GPU = len(args.device_ids.split(',')) batch_size_train", "= lr return lr drop_prob = [0.] * 4 if args.drop_group: drop_probs =", "loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] =", "torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net def adjust_learning_rate(optimizer, epoch, step, len_epoch): \"\"\"decrease", "parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop prob') parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional layer parameters')", "parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path to save') parser.add_argument('--log_path', '-lp', default='./log/', help='log path to", "help='tumor or tumor_beside or fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis or replase') parser.add_argument('--use_std',", "{}, lr={}'.format(epoch, lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr return lr drop_prob", "= LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'm': print('use MinimalLaycaSGD') optimizer", "net.eval() length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device)", "sp = ckpt_path_save + '/' + str(args.way) if not os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data", "(4 - block_group) if args.freeze: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for param in", "at_risk, failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy())", "Observed = torch.cat((Observed, O.float())) Prediction, Survival, Observed, at_risk, failures, ties, _ = SA.calc_at_risk(Prediction,", "default='0,1,2,3,4', type=str, help='comma separated indices of GPU to use,' ' e.g. 0,1 for", "= torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net def adjust_learning_rate(optimizer, epoch, step, len_epoch):", "epoch >= 20: lr /= 10 if epoch >= 40: lr /= 10", "in drop_group: if block_group < 1 or block_group > 4: raise ValueError( 'drop_group", "= SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss = cox_cost(Prediction, at_risk,", "sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from utils.Survival_Aanlysis import SurvivalAnalysis from utils.RiskLayer import cox_cost from Prognostic.data.image_producer", "torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed =", "T.to(device) O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) print(\"loss:\",", "Observed, at_risk, failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(),", "train_data = ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False,", "default='tumor', type=str, help='tumor or tumor_beside or fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis or", "of integers' 'between 1 and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group - 1] = drop_probs /", "/= 10 if epoch >= 40: lr /= 10 if epoch >= 80:", "{}, lr = {}'.format(epoch, step, lr)) elif step == 0: print('epoch = {},", "1)), failures, ties) loss.register_hook(lambda g: print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction =", "enumerate(dataloader): if O.sum() == 0: continue N = O.shape[0] print('T:', T) print('O:', O)", "import os import sys import torch import torch.utils.data from tensorboardX import SummaryWriter import", "import argparse import os import sys import torch import torch.utils.data from tensorboardX import", "loss.register_hook(lambda g: print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction = torch.cat((Prediction, output)) Survival", "loss.item()) Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed = torch.cat((Observed, O.float()))", "default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path to save') parser.add_argument('--log_path', '-lp', default='./log/',", "= torch.Tensor().to(device) Observed = torch.Tensor().to(device) with torch.no_grad(): for idx, (img, T, O, _,", "drop_last=True, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader)) summary_train =", "type=str, help='prognosis or replase') parser.add_argument('--use_std', '-std', default='use', type=str, help='use std as feature, u:use,", "lr /= 10 if epoch >= 40: lr /= 10 if epoch >=", "sys import torch import torch.utils.data from tensorboardX import SummaryWriter import torch.backends.cudnn as cudnn", "\", args.ckpt) assert len(args.ckpt) != 0, \"Please input a valid ckpt_path\" checkpoint =", "failures, ties, _ = SA.calc_at_risk(output, T, O) print('ties:', ties) T = T.to(device) O", "torch.Tensor().to(device) Observed = torch.Tensor().to(device) for idx, (img, T, O, _, count) in enumerate(dataloader):", "std as feature, u:use, o:only, n:not use ') parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose", "' e.g. 0,1 for using GPU_0' ' and GPU_1, default 0.') parser.add_argument('--drop_group', '-drop_group',", "block_group) if args.freeze: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for param in net.fc.parameters(): param.requires_grad", "ties) T = T.to(device) O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)),", "= torch.nn.DataParallel(net, device_ids=None) if args.resume: net = load_checkpoint(args, net) def train(epoch, dataloader, summary):", "ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"]", "param_group['lr'] = lr return lr drop_prob = [0.] * 4 if args.drop_group: drop_probs", "10 or combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor path to load') parser.add_argument('--load_pth_valid', '-lpth_v',", "Survival.cpu(), Observed.cpu()) CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures,", "'-ckpt', default='./', help='checkpoint path to load') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--way',", "def train(epoch, dataloader, summary): loss_sum = 0 acc_sum = 0 net.train() pth =", "+ '.ckpt')) summary_valid = valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'],", "4.0 ** (4 - block_group) if args.freeze: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for", "net(img) output, T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) print('ties:',", "loss_sum = 0 acc_sum = 0 net.train() pth = \"\" length = len(dataloader)", "= {'epoch': 0, 'fp': 0, 'tp': 0, 'Neg': 0, 'Pos': 0} summary_valid =", "summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch ) print('train/loss', summary_train['loss'], epoch) print('train/CI', summary_train['CI'], epoch)", "device = torch.device(\"cuda\" if args.use_cuda else \"cpu\") num_GPU = len(args.device_ids.split(',')) batch_size_train = args.batch_size", "200 and 300 epoch\"\"\" lr = args.lr if epoch >= 20: lr /=", "'drop_group should be a comma separated list of integers' 'between 1 and 4(drop_group:{}).'.format(args.drop_group)", "num_workers = args.num_worker * num_GPU SA = SurvivalAnalysis() def load_checkpoint(args, net): print(\"Use ckpt:", "10 if epoch >= 80: lr /= 10 '''warmup''' if epoch < 5:", "for epoch in range(args.start, args.end): summary_train = train(epoch, train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'],", "= lr * float(1 + step + epoch * len_epoch) / (5. *", "'or l(LaycaSGD)') args = parser.parse_args() cudnn.benchmark = True log_path = os.path.join(args.log_path, args.experiment_name +", "os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device = torch.device(\"cuda\" if args.use_cuda else \"cpu\") num_GPU", "== 'Adadelta': print('use Adadelta') optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4) net =", "print('train/loss', summary_train['loss'], epoch) print('train/CI', summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'], epoch) if", "def adjust_learning_rate(optimizer, epoch, step, len_epoch): \"\"\"decrease the learning rate at 200 and 300", "T) print('O:', O) if args.optimizer != 'Adadelta': lr = adjust_learning_rate(optimizer, epoch, idx, len(dataloader))", "loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) loss.register_hook(lambda g: print(g)) optimizer.zero_grad() loss.backward()", "type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader =", "id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path to save')", "args.drop_prob drop_group = [int(x) for x in args.drop_group.split(',')] for block_group in drop_group: if", "summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'], epoch) if summary_valid['loss'] < loss_valid_best: loss_vd_best", "print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() return summary d_pth", "step == 0: print('epoch = {}, lr={}'.format(epoch, lr)) for param_group in optimizer.param_groups: param_group['lr']", "lr return lr drop_prob = [0.] * 4 if args.drop_group: drop_probs = args.drop_prob", "momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'Adadelta': print('use Adadelta') optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr,", "indices of GPU to use,' ' e.g. 0,1 for using GPU_0' ' and", "= torch.Tensor().to(device) for idx, (img, T, O, _, count) in enumerate(dataloader): if O.sum()", "= O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) loss.register_hook(lambda g: print(g))", "args = parser.parse_args() cudnn.benchmark = True log_path = os.path.join(args.log_path, args.experiment_name + \"_\" +", "= 0 acc_sum = 0 net.train() pth = \"\" length = len(dataloader) Prediction", "type=int, help='batch size') parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker') parser.add_argument('--start', '-s', default='0', type=int, help='start", "p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2) else: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer ==", "'-lr', default='1e-4', type=float, help='learning rate') parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD momentum') parser.add_argument('--batch_size', '-b',", "* num_GPU batch_size_valid = args.batch_size * num_GPU print(\"batch_size:\",batch_size_train) num_workers = args.num_worker * num_GPU", "from checkpoint') parser.add_argument('--way', '-way', default='10', type=str, help='train way, 40 10 or combinate') parser.add_argument('--load_pth_train',", "print('train/CI', summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'], epoch) if summary_valid['loss'] < loss_valid_best:", "checkpoint') parser.add_argument('--way', '-way', default='10', type=str, help='train way, 40 10 or combinate') parser.add_argument('--load_pth_train', '-lpth_t',", "\"_\" + str(args.experiment_id)) if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device = torch.device(\"cuda\"", "idx, (img, T, O, _, count) in enumerate(dataloader): if O.sum() == 0: continue", "weight_decay=1e-2) else: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer == 'a': print('use adam') optimizer", "torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4) if args.optimizer == 'l': print('use LaycaSGD') optimizer = LaycaSGD(net.parameters(),", "or replase') parser.add_argument('--use_std', '-std', default='use', type=str, help='use std as feature, u:use, o:only, n:not", "epoch) print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'], epoch) if summary_valid['loss'] < loss_valid_best: loss_vd_best =", "shuffle=False) print(\"length:\", len(train_dataloader)) summary_train = {'epoch': 0, 'fp': 0, 'tp': 0, 'Neg': 0,", "summary['loss'] = loss.item() summary['CI'] = CI.item() summary['lr'] = optimizer.param_groups[0]['lr'] return summary def valid(dataloader,", "\"cpu\") num_GPU = len(args.device_ids.split(',')) batch_size_train = args.batch_size * num_GPU batch_size_valid = args.batch_size *", "failures, ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] = CI.item() summary['lr']", "for block_group in drop_group: if block_group < 1 or block_group > 4: raise", "step, lr)) elif step == 0: print('epoch = {}, lr={}'.format(epoch, lr)) for param_group", "parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated indices of", "load') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--way', '-way', default='10', type=str, help='train way,", "4 if args.drop_group: drop_probs = args.drop_prob drop_group = [int(x) for x in args.drop_group.split(',')]", "T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O) print('ties:', ties) T", "train(epoch, dataloader, summary): loss_sum = 0 acc_sum = 0 net.train() pth = \"\"", "summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch) if epoch % 1 == 0: torch.save({'epoch':", "optimizer.param_groups[0]['lr'] return summary def valid(dataloader, summary): net.eval() length = len(dataloader) Prediction = torch.Tensor().to(device)", "optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4) net = torch.nn.DataParallel(net, device_ids=None) if args.resume:", "lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4) net = torch.nn.DataParallel(net, device_ids=None) if args.resume: net = load_checkpoint(args,", "float(summary_valid['loss']), epoch) print('valid/CI', summary_valid['CI'], epoch) if summary_valid['loss'] < loss_valid_best: loss_vd_best = summary_valid['loss'] torch.save({'epoch':", "print('O:', O) img = img.to(device) output = net(img) output, T, O, at_risk, failures,", "torch.utils.data from tensorboardX import SummaryWriter import torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from", "import ImageDataset from Prognostic.model import MODELS from lifelines.utils import concordance_index from utils.LaycaOptimizer import", "help='drop prob') parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional layer parameters') parser.add_argument('--type-key', '-type-key', default='tumor', type=str,", "type=str, help='tumor or tumor_beside or fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis or replase')", "tumor_beside or fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis or replase') parser.add_argument('--use_std', '-std', default='use',", "optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'm': print('use MinimalLaycaSGD')", "if epoch % 1 == 0: torch.save({'epoch': summary_train['epoch'], 'state_dict': net.state_dict()}, (sp + '/'", "combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor path to load') parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid", ") drop_prob[block_group - 1] = drop_probs / 4.0 ** (4 - block_group) if", "= torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2) else: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if", "at_risk, O.reshape((N, 1)), failures, ties) loss.register_hook(lambda g: print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step()", "length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) with", "summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'], epoch )", "loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) print(\"loss:\", loss.item()) Prediction = torch.cat((Prediction,", "epoch, step, len_epoch): \"\"\"decrease the learning rate at 200 and 300 epoch\"\"\" lr", "lr * float(1 + step + epoch * len_epoch) / (5. * len_epoch)", "help='prognosis or replase') parser.add_argument('--use_std', '-std', default='use', type=str, help='use std as feature, u:use, o:only,", "!= 0, \"Please input a valid ckpt_path\" checkpoint = torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict']", "for x in args.drop_group.split(',')] for block_group in drop_group: if block_group < 1 or", "ties, _ = SA.calc_at_risk(output, T, O) T = T.to(device) O = O.to(device) loss", "CI.item() return summary d_pth = args.data_path sp = ckpt_path_save + '/' + str(args.way)", "args.optimizer == 'l': print('use LaycaSGD') optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if", "default='1.0', type=float, help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated indices of GPU to", "'-type-key', default='tumor', type=str, help='tumor or tumor_beside or fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis", "epoch\"\"\" lr = args.lr if epoch >= 20: lr /= 10 if epoch", "parser.parse_args() cudnn.benchmark = True log_path = os.path.join(args.log_path, args.experiment_name + \"_\" + str(args.experiment_id)) if", "print('use adam') optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4) if args.optimizer == 's':", "_ = SA.calc_at_risk(output, T, O) print('ties:', ties) T = T.to(device) O = O.to(device)", "load') parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated indices", "Observed = torch.Tensor().to(device) for idx, (img, T, O, _, count) in enumerate(dataloader): if", "len(args.device_ids.split(',')) batch_size_train = args.batch_size * num_GPU batch_size_valid = args.batch_size * num_GPU print(\"batch_size:\",batch_size_train) num_workers", "os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save)", "survival time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str, help='data path') parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use", "= O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) print(\"loss:\", loss.item()) Prediction", "if args.use_cuda else \"cpu\") num_GPU = len(args.device_ids.split(',')) batch_size_train = args.batch_size * num_GPU batch_size_valid", "num_GPU = len(args.device_ids.split(',')) batch_size_train = args.batch_size * num_GPU batch_size_valid = args.batch_size * num_GPU", "lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4) if args.optimizer == 's': print('use SGD') optimizer = torch.optim.SGD(net.parameters(),", "import SurvivalAnalysis from utils.RiskLayer import cox_cost from Prognostic.data.image_producer import ImageDataset from Prognostic.model import", "ties, _ = SA.calc_at_risk(output, T, O) print('ties:', ties) T = T.to(device) O =", "'/' + str(args.way) if not os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data = ImageDataset(d_pth, factor=args.way, val=False,", "'learning_rate', summary_train['lr'], epoch ) print('train/loss', summary_train['loss'], epoch) print('train/CI', summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']), epoch)", "len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) with torch.no_grad(): for", "using GPU_0' ' and GPU_1, default 0.') parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups') parser.add_argument('--drop_prob',", "GPU_1, default 0.') parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop", "drop_probs = args.drop_prob drop_group = [int(x) for x in args.drop_group.split(',')] for block_group in", "way, 40 10 or combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor path to load')", "and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group - 1] = drop_probs / 4.0 ** (4 -", "= True optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2) else: net =", "weight_decay=5e-4) if args.optimizer == 'l': print('use LaycaSGD') optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4,", "str(epoch) + '.ckpt')) summary_valid = valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI',", "lr=args.lr, weight_decay=5e-4) if args.optimizer == 'l': print('use LaycaSGD') optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9,", "separated list of integers' 'between 1 and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group - 1] =", "= torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed = torch.cat((Observed, O.float())) Prediction, Survival,", "from utils.RiskLayer import cox_cost from Prognostic.data.image_producer import ImageDataset from Prognostic.model import MODELS from", "to use,' ' e.g. 0,1 for using GPU_0' ' and GPU_1, default 0.')", "use ') parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD) '", "summary_train['lr'], epoch ) print('train/loss', summary_train['loss'], epoch) print('train/CI', summary_train['CI'], epoch) print('valid/loss', float(summary_valid['loss']), epoch) print('valid/CI',", "if epoch < 5: lr = lr * float(1 + step + epoch", "'tp': 0, 'Neg': 0, 'Pos': 0} summary_valid = {'loss': float('inf'), 'acc': 0} summary_writer", "optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'Adadelta': print('use Adadelta')", "path to save') parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path to load') parser.add_argument('--resume', '-r', action='store_true',", "parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning rate') parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD momentum') parser.add_argument('--batch_size',", "drop_prob=drop_prob).to(device) if args.optimizer == 'a': print('use adam') optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99),", "checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net def adjust_learning_rate(optimizer, epoch, step, len_epoch): \"\"\"decrease the learning rate", "torch.cat((Observed, O.float())) Prediction, Survival, Observed, at_risk, failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu())", "summary['CI'] = CI.item() return summary d_pth = args.data_path sp = ckpt_path_save + '/'", "train(epoch, train_dataloader, summary_train) summary_writer.add_scalar( 'train/loss', summary_train['loss'], epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch) if epoch", "help='comma separated indices of GPU to use,' ' e.g. 0,1 for using GPU_0'", "True log_path = os.path.join(args.log_path, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.isdir(log_path): os.mkdir(log_path)", "* 4 if args.drop_group: drop_probs = args.drop_prob drop_group = [int(x) for x in", "type=int, help='end epoch') parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name')", "lr /= 10 '''warmup''' if epoch < 5: lr = lr * float(1", "T = T.to(device) O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures,", "require_grad=False).to(device) for param in net.fc.parameters(): param.requires_grad = True optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,", "torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4) if args.optimizer == 's': print('use SGD') optimizer =", "'fp': 0, 'tp': 0, 'Neg': 0, 'Pos': 0} summary_valid = {'loss': float('inf'), 'acc':", "summary_valid = valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar(", "type=float, help='SGD momentum') parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch size') parser.add_argument('--num_worker', '-nw', default='2', type=int,", "help='use cuda') parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning rate') parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD", "g: print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5) optimizer.step() Prediction = torch.cat((Prediction, output)) Survival =", "in args.drop_group.split(',')] for block_group in drop_group: if block_group < 1 or block_group >", "= torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) for idx, (img, T, O,", "'.ckpt')) summary_valid = valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch)", ">= 40: lr /= 10 if epoch >= 80: lr /= 10 '''warmup'''", "torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from utils.Survival_Aanlysis import SurvivalAnalysis from utils.RiskLayer import", "print('epoch = {}, lr={}'.format(epoch, lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr return", "torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2) else: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer", "= MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'Adadelta': print('use Adadelta') optimizer", "num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader)) summary_train", "help='choose optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or l(LaycaSGD)') args = parser.parse_args() cudnn.benchmark", "epoch >= 40: lr /= 10 if epoch >= 80: lr /= 10", "= img.to(device) output = net(img) output, T, O, at_risk, failures, ties, _ =", "net = load_checkpoint(args, net) def train(epoch, dataloader, summary): loss_sum = 0 acc_sum =", "net.train() pth = \"\" length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device)", "and GPU_1, default 0.') parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups') parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float,", "O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) print(\"loss:\", loss.item())", "args.resume: net = load_checkpoint(args, net) def train(epoch, dataloader, summary): loss_sum = 0 acc_sum", "momentum=0.9, lr=args.lr, weight_decay=5e-4) if args.optimizer == 'l': print('use LaycaSGD') optimizer = LaycaSGD(net.parameters(), lr=args.lr,", ">= 20: lr /= 10 if epoch >= 40: lr /= 10 if", "= MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer == 'a': print('use adam') optimizer = torch.optim.Adam(net.parameters(), lr=args.lr,", "drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader)) summary_train = {'epoch': 0, 'fp': 0, 'tp': 0, 'Neg':", "import torch import torch.utils.data from tensorboardX import SummaryWriter import torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__))", "num_GPU batch_size_valid = args.batch_size * num_GPU print(\"batch_size:\",batch_size_train) num_workers = args.num_worker * num_GPU SA", "-Prediction.cpu().detach().numpy(), Observed.cpu().detach().numpy()) loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\", loss.item(), \"CI:\", CI.item())", "to load') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--way', '-way', default='10', type=str, help='train", "epoch * len_epoch) / (5. * len_epoch) print('epoch = {}, step = {},", "num_GPU SA = SurvivalAnalysis() def load_checkpoint(args, net): print(\"Use ckpt: \", args.ckpt) assert len(args.ckpt)", "valid_data = ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data)) train_dataloader = torch.utils.data.DataLoader(train_data,", "for using GPU_0' ' and GPU_1, default 0.') parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups')", "type=bool, help='use cuda') parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning rate') parser.add_argument('--momentum', '-mom', default='0.9', type=float,", "O.shape[0] print('T:', T) print('O:', O) if args.optimizer != 'Adadelta': lr = adjust_learning_rate(optimizer, epoch,", "print('use SGD') optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4) if args.optimizer == 'l': print('use", "for idx, (img, T, O, _, count) in enumerate(dataloader): N = O.shape[0] print('T:',", "10 if epoch >= 40: lr /= 10 if epoch >= 80: lr", "argparse import os import sys import torch import torch.utils.data from tensorboardX import SummaryWriter", "= drop_probs / 4.0 ** (4 - block_group) if args.freeze: net = MODELS[('resnet50')](factor=args.way,", "lr = {}'.format(epoch, step, lr)) elif step == 0: print('epoch = {}, lr={}'.format(epoch,", "epoch) summary_writer.add_scalar( 'train/CI', summary_train['CI'], epoch) if epoch % 1 == 0: torch.save({'epoch': summary_train['epoch'],", "MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'Adadelta': print('use", "or fibrous_tissue') parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis or replase') parser.add_argument('--use_std', '-std', default='use', type=str,", "'-way', default='10', type=str, help='train way, 40 10 or combinate') parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train", "enumerate(dataloader): N = O.shape[0] print('T:', T) print('O:', O) img = img.to(device) output =", "torch import torch.utils.data from tensorboardX import SummaryWriter import torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) +", "+ str(args.way) if not os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data = ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key,", "for param_group in optimizer.param_groups: param_group['lr'] = lr return lr drop_prob = [0.] *", "tensorboardX import SummaryWriter import torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../') from utils.Survival_Aanlysis import", "\"\"\"decrease the learning rate at 200 and 300 epoch\"\"\" lr = args.lr if", "_ = SA.calc_at_risk(output, T, O) T = T.to(device) O = O.to(device) loss =", "len_epoch): \"\"\"decrease the learning rate at 200 and 300 epoch\"\"\" lr = args.lr", "= loss.item() summary['CI'] = CI.item() return summary d_pth = args.data_path sp = ckpt_path_save", "net.load_state_dict(pretrained_dict) return net def adjust_learning_rate(optimizer, epoch, step, len_epoch): \"\"\"decrease the learning rate at", "default='./tensor_path', help='valid tensor path to load') parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup alpha') parser.add_argument('--device_ids',", "if epoch >= 40: lr /= 10 if epoch >= 80: lr /=", "if O.sum() == 0: continue N = O.shape[0] print('T:', T) print('O:', O) if", "return summary d_pth = args.data_path sp = ckpt_path_save + '/' + str(args.way) if", "ckpt_path_save + '/' + str(args.way) if not os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data = ImageDataset(d_pth,", "* len_epoch) print('epoch = {}, step = {}, lr = {}'.format(epoch, step, lr))", "s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or l(LaycaSGD)') args = parser.parse_args() cudnn.benchmark = True", "** (4 - block_group) if args.freeze: net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device) for param", "'s': print('use SGD') optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4) if args.optimizer == 'l':", "{}'.format(epoch, step, lr)) elif step == 0: print('epoch = {}, lr={}'.format(epoch, lr)) for", "_, count) in enumerate(dataloader): if O.sum() == 0: continue N = O.shape[0] print('T:',", "import torch.utils.data from tensorboardX import SummaryWriter import torch.backends.cudnn as cudnn sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')", "default='0', type=int, help='start epoch') parser.add_argument('--end', '-e', default='10000', type=int, help='end epoch') parser.add_argument('--experiment_id', '-eid', default='0',", "= torch.Tensor().to(device) with torch.no_grad(): for idx, (img, T, O, _, count) in enumerate(dataloader):", "torch.Tensor().to(device) with torch.no_grad(): for idx, (img, T, O, _, count) in enumerate(dataloader): N", "type=float, help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated indices of GPU to use,'", "_, count) in enumerate(dataloader): N = O.shape[0] print('T:', T) print('O:', O) img =", "= args.data_path sp = ckpt_path_save + '/' + str(args.way) if not os.path.exists(sp): os.mkdir(sp)", "valid ckpt_path\" checkpoint = torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net def adjust_learning_rate(optimizer,", "args.optimizer == 'm': print('use MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if", "'l': print('use LaycaSGD') optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer ==", "e.g. 0,1 for using GPU_0' ' and GPU_1, default 0.') parser.add_argument('--drop_group', '-drop_group', default='3,4',", "cudnn.benchmark = True log_path = os.path.join(args.log_path, args.experiment_name + \"_\" + str(args.experiment_id)) if not", "SurvivalAnalysis from utils.RiskLayer import cox_cost from Prognostic.data.image_producer import ImageDataset from Prognostic.model import MODELS", "checkpoint = torch.load(args.ckpt) pretrained_dict = checkpoint['state_dict'] net.load_state_dict(pretrained_dict) return net def adjust_learning_rate(optimizer, epoch, step,", "default='./model/', help='checkpoint path to save') parser.add_argument('--log_path', '-lp', default='./log/', help='log path to save') parser.add_argument('--ckpt',", "help='SGD momentum') parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch size') parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker')", "cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) loss.register_hook(lambda g: print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), 5)", "drop_prob = [0.] * 4 if args.drop_group: drop_probs = args.drop_prob drop_group = [int(x)", "torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) with torch.no_grad(): for idx, (img, T,", "{'loss': float('inf'), 'acc': 0} summary_writer = SummaryWriter(log_path) loss_valid_best = float('inf') for epoch in", "= len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) for idx,", "Adadelta') optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4) net = torch.nn.DataParallel(net, device_ids=None) if", "lifelines.utils import concordance_index from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD parser = argparse.ArgumentParser(description='Predicting survival time')", "net) def train(epoch, dataloader, summary): loss_sum = 0 acc_sum = 0 net.train() pth", "= argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str, help='data path') parser.add_argument('--use_cuda', '-use_cuda', default='True',", "to save') parser.add_argument('--log_path', '-lp', default='./log/', help='log path to save') parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint", "type=int, help='num_worker') parser.add_argument('--start', '-s', default='0', type=int, help='start epoch') parser.add_argument('--end', '-e', default='10000', type=int, help='end", "== 'l': print('use LaycaSGD') optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer", "help='mixup alpha') parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated indices of GPU to use,' '", "cuda') parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning rate') parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD momentum')", "summary): net.eval() length = len(dataloader) Prediction = torch.Tensor().to(device) Survival = torch.Tensor().to(device) Observed =", "print('O:', O) if args.optimizer != 'Adadelta': lr = adjust_learning_rate(optimizer, epoch, idx, len(dataloader)) img", "= [0.] * 4 if args.drop_group: drop_probs = args.drop_prob drop_group = [int(x) for", "block_group < 1 or block_group > 4: raise ValueError( 'drop_group should be a", "parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose optimizer:a(adam), s(sgd), ' 'Adadelta(Adadelta), m(MinimalLaycaSGD) ' 'or l(LaycaSGD)')", "net def adjust_learning_rate(optimizer, epoch, step, len_epoch): \"\"\"decrease the learning rate at 200 and", "O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) loss.register_hook(lambda g:", "ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway)", "= {}, lr={}'.format(epoch, lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr return lr", "epoch') parser.add_argument('--end', '-e', default='10000', type=int, help='end epoch') parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id') parser.add_argument('--experiment_name',", "float('inf'), 'acc': 0} summary_writer = SummaryWriter(log_path) loss_valid_best = float('inf') for epoch in range(args.start,", "momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'm': print('use MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr,", "argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str, help='data path') parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool,", "valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate', summary_train['lr'],", "= valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch) summary_writer.add_scalar( 'valid/CI', summary_valid['CI'], epoch) summary_writer.add_scalar( 'learning_rate',", "replase') parser.add_argument('--use_std', '-std', default='use', type=str, help='use std as feature, u:use, o:only, n:not use", "print('ties:', ties) T = T.to(device) O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N,", "len(dataloader)) img = img.to(device) output = net(img) output, T, O, at_risk, failures, ties,", "'-b', default='5', type=int, help='batch size') parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker') parser.add_argument('--start', '-s', default='0',", "5) optimizer.step() Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed = torch.cat((Observed,", "import sys import torch import torch.utils.data from tensorboardX import SummaryWriter import torch.backends.cudnn as", "net.fc.parameters(): param.requires_grad = True optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, weight_decay=1e-2) else:", "LaycaSGD') optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'm': print('use", "SA.calc_at_risk(output, T, O) print('ties:', ties) T = T.to(device) O = O.to(device) loss =", "loss_valid_best: loss_vd_best = summary_valid['loss'] torch.save({'epoch': summary_train['epoch'], 'optimizer': optimizer.state_dict(), 'state_dict': net.state_dict()}, os.path.join(sp, 'best.ckpt')) summary_writer.close()", "'between 1 and 4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group - 1] = drop_probs / 4.0 **", "lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'm': print('use MinimalLaycaSGD') optimizer = MinimalLaycaSGD(net.parameters(),", "'Adadelta': lr = adjust_learning_rate(optimizer, epoch, idx, len(dataloader)) img = img.to(device) output = net(img)", "= torch.Tensor().to(device) Observed = torch.Tensor().to(device) for idx, (img, T, O, _, count) in", "Survival = torch.Tensor().to(device) Observed = torch.Tensor().to(device) for idx, (img, T, O, _, count)", "os.path.join(args.ckpt_path_save, args.experiment_name + \"_\" + str(args.experiment_id)) if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids", "ValueError( 'drop_group should be a comma separated list of integers' 'between 1 and", "Survival = torch.cat((Survival, T.float())) Observed = torch.cat((Observed, O.float())) Prediction, Survival, Observed, at_risk, failures,", "from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD parser = argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic',", "'/' + str(epoch) + '.ckpt')) summary_valid = valid(valid_dataloader, summary_valid) summary_writer.add_scalar( 'valid/loss', summary_valid['loss'], epoch)", "epoch < 5: lr = lr * float(1 + step + epoch *", "O, _, count) in enumerate(dataloader): if O.sum() == 0: continue N = O.shape[0]", "= cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) print(\"loss:\", loss.item()) Prediction = torch.cat((Prediction, output))", "lr)) elif step == 0: print('epoch = {}, lr={}'.format(epoch, lr)) for param_group in", "= torch.utils.data.DataLoader(train_data, batch_size=batch_size_train, num_workers=num_workers, drop_last=True, shuffle=True) valid_dataloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False)", "+ \"_\" + str(args.experiment_id)) if not os.path.exists(ckpt_path_save): os.mkdir(ckpt_path_save) os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device_ids device =", "len(args.ckpt) != 0, \"Please input a valid ckpt_path\" checkpoint = torch.load(args.ckpt) pretrained_dict =", "[int(x) for x in args.drop_group.split(',')] for block_group in drop_group: if block_group < 1", "type=str, help='use std as feature, u:use, o:only, n:not use ') parser.add_argument('--optimizer', '-o', default='a',", "summary def valid(dataloader, summary): net.eval() length = len(dataloader) Prediction = torch.Tensor().to(device) Survival =", "should be a comma separated list of integers' 'between 1 and 4(drop_group:{}).'.format(args.drop_group) )", "= cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties) loss.register_hook(lambda g: print(g)) optimizer.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(),", "4(drop_group:{}).'.format(args.drop_group) ) drop_prob[block_group - 1] = drop_probs / 4.0 ** (4 - block_group)", "epoch') parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id') parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name') parser.add_argument('--ckpt_path_save', '-ckpt_s',", "lr drop_prob = [0.] * 4 if args.drop_group: drop_probs = args.drop_prob drop_group =", "print('use LaycaSGD') optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True) if args.optimizer == 'm':", "val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data = ImageDataset(d_pth, way=\"valid\", factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) print(len(train_data)) print(len(valid_data))", "300 epoch\"\"\" lr = args.lr if epoch >= 20: lr /= 10 if", "parser = argparse.ArgumentParser(description='Predicting survival time') parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str, help='data path') parser.add_argument('--use_cuda', '-use_cuda',", "at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss'] = loss.item() summary['CI'] =", "at 200 and 300 epoch\"\"\" lr = args.lr if epoch >= 20: lr", "= torch.utils.data.DataLoader(valid_data, batch_size=batch_size_valid, num_workers=num_workers, drop_last=False, shuffle=False) print(\"length:\", len(train_dataloader)) summary_train = {'epoch': 0, 'fp':", "from Prognostic.model import MODELS from lifelines.utils import concordance_index from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD", "torch.no_grad(): for idx, (img, T, O, _, count) in enumerate(dataloader): N = O.shape[0]", "output)) Survival = torch.cat((Survival, T.float())) Observed = torch.cat((Observed, O.float())) Prediction, Survival, Observed, at_risk,", "utils.RiskLayer import cox_cost from Prognostic.data.image_producer import ImageDataset from Prognostic.model import MODELS from lifelines.utils", "import MODELS from lifelines.utils import concordance_index from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD parser =", "device_ids=None) if args.resume: net = load_checkpoint(args, net) def train(epoch, dataloader, summary): loss_sum =", "layer parameters') parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor or tumor_beside or fibrous_tissue') parser.add_argument('--experimentway', '-eway',", "== 'a': print('use adam') optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4) if args.optimizer", "= len(args.device_ids.split(',')) batch_size_train = args.batch_size * num_GPU batch_size_valid = args.batch_size * num_GPU print(\"batch_size:\",batch_size_train)", "net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device) if args.optimizer == 'a': print('use adam') optimizer = torch.optim.Adam(net.parameters(),", "of GPU to use,' ' e.g. 0,1 for using GPU_0' ' and GPU_1,", "failures, ties) print(\"loss:\", loss.item()) Prediction = torch.cat((Prediction, output)) Survival = torch.cat((Survival, T.float())) Observed", "not os.path.exists(sp): os.mkdir(sp) print(d_pth) train_data = ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key, ExperimentWay=args.experimentway) valid_data =", "- 1] = drop_probs / 4.0 ** (4 - block_group) if args.freeze: net", "block_group in drop_group: if block_group < 1 or block_group > 4: raise ValueError(", "+ str(args.experiment_id)) if not os.path.isdir(log_path): os.mkdir(log_path) ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name + \"_\" +", "GPU to use,' ' e.g. 0,1 for using GPU_0' ' and GPU_1, default", "Observed.cpu().detach().numpy()) loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties) print(\"loss:\", loss.item(), \"CI:\", CI.item()) summary['loss']", "adjust_learning_rate(optimizer, epoch, step, len_epoch): \"\"\"decrease the learning rate at 200 and 300 epoch\"\"\"", "= T.to(device) O = O.to(device) loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties)", "nesterov=True) if args.optimizer == 'Adadelta': print('use Adadelta') optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06," ]
[ "MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons", "EPILOG = \"\"\"<NAME>, SENCKENBERG Biodiversity and Climate Research Centre (BiK-F) email: <EMAIL> 2017/09/26\"\"\"", "except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create sublogger", "+ ' ' * len(header)) return str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError:", "return str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError: # both formatters should use", "' ' * len(header)) return str # optional colored console logger (nice!) try:", "rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath,", "logging from logging.handlers import RotatingFileHandler import numpy as np import sys try: #", "record): str = logging.Formatter.format(self, record) header, footer = str.split(record.message) str = str.replace('\\n', '\\n'", "class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors, record.levelname) str = logging.Formatter.format(self,", "\"\"\" def format(self, record): str = logging.Formatter.format(self, record) header, footer = str.split(record.message) str", "(nice!) try: import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors,", "return str # optional colored console logger (nice!) try: import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter):", "CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError: # both formatters should use the default", "%H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler()", "lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler() hCons.setFormatter(lfCons)", "len(header)) return str # optional colored console logger (nice!) try: import colorlog class", "# python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self,", "record): pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create sublogger for different scripts logPath = '.'", "= self.color(self.log_colors, record.levelname) str = logging.Formatter.format(self, record) header, footer = str.split(record.message) str =", "ImportError: # both formatters should use the default (non-color) MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT", "logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create sublogger for different scripts logPath = '.' fileName =", "= str.replace('\\n', '\\n' + ' ' * len(header)) return str # optional colored", "str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError: # both formatters should use the", "class MultiLineFormatter(logging.Formatter): \"\"\" A custom multi-line logging formatter \"\"\" def format(self, record): str", "formatter \"\"\" def format(self, record): str = logging.Formatter.format(self, record) header, footer = str.split(record.message)", "different scripts logPath = '.' fileName = 'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\" A custom", "CONS_FORMAT = \"[%(levelname)-8s] %(message)s\" FILE_FORMAT = \"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT,", "optional colored console logger (nice!) try: import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record):", "logging.handlers import RotatingFileHandler import numpy as np import sys try: # python 2.7+", "sublogger for different scripts logPath = '.' fileName = 'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\"", "RotatingFileHandler import numpy as np import sys try: # python 2.7+ from logging", "MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s] %(message)s\" FILE_FORMAT = \"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons =", "scripts logPath = '.' fileName = 'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\" A custom multi-line", "str.split(record.message) str = str.replace('\\n', '\\n' + ' ' * len(header)) return str #", "header, footer = str.split(record.message) str = str.replace('\\n', '\\n' + ' ' * len(header))", "str # optional colored console logger (nice!) try: import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def", "hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>, SENCKENBERG Biodiversity", "= logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName),", "except ImportError: # both formatters should use the default (non-color) MultiLineFormatterColor = MultiLineFormatter", "import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO:", "import logging from logging.handlers import RotatingFileHandler import numpy as np import sys try:", "'.' fileName = 'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\" A custom multi-line logging formatter \"\"\"", "* len(header)) return str # optional colored console logger (nice!) try: import colorlog", "MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons)", "record.log_color = self.color(self.log_colors, record.levelname) str = logging.Formatter.format(self, record) header, footer = str.split(record.message) str", "datefmt='%Y-%m-%d %H:%M:%S') rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile", "RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>, SENCKENBERG Biodiversity and Climate", "(non-color) MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s] %(message)s\" FILE_FORMAT = \"%(asctime)s [%(levelname)-8s] %(message)s", "str = str.replace('\\n', '\\n' + ' ' * len(header)) return str # optional", "def format(self, record): str = logging.Formatter.format(self, record) header, footer = str.split(record.message) str =", "np import sys try: # python 2.7+ from logging import NullHandler except ImportError:", "= str.replace('\\n', '\\n' + ' ' * len(header)) return str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s]", "= MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s] %(message)s\" FILE_FORMAT = \"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons", "multi-line logging formatter \"\"\" def format(self, record): str = logging.Formatter.format(self, record) header, footer", "' ' * len(header)) return str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError: #", "formatters should use the default (non-color) MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s] %(message)s\"", "NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create", "= 'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\" A custom multi-line logging formatter \"\"\" def format(self,", "record): record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors, record.levelname) str = logging.Formatter.format(self, record) header, footer =", "= RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>, SENCKENBERG Biodiversity and", "import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors, record.levelname) str", "str = logging.Formatter.format(self, record) header, footer = str.split(record.message) str = str.replace('\\n', '\\n' +", "'\\n' + ' ' * len(header)) return str # optional colored console logger", "try: # python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def", "'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\" A custom multi-line logging formatter \"\"\" def format(self, record):", "= MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG)", "fileName = 'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\" A custom multi-line logging formatter \"\"\" def", "fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>, SENCKENBERG Biodiversity and Climate Research", "rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>, SENCKENBERG", "A custom multi-line logging formatter \"\"\" def format(self, record): str = logging.Formatter.format(self, record)", "use the default (non-color) MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s] %(message)s\" FILE_FORMAT =", "def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create sublogger for different scripts logPath", "# TODO: create sublogger for different scripts logPath = '.' fileName = 'lpjguesstools'", "= \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError: # both formatters should use the default (non-color)", "for different scripts logPath = '.' fileName = 'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\" A", "= MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG)", "logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG", "format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors, record.levelname) str = logging.Formatter.format(self, record) header, footer", "str.split(record.message) str = str.replace('\\n', '\\n' + ' ' * len(header)) return str CONS_FORMAT", "as np import sys try: # python 2.7+ from logging import NullHandler except", "hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>, SENCKENBERG Biodiversity and Climate Research Centre (BiK-F)", "lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger = logging.getLogger(__name__)", "colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors, record.levelname) str =", "create sublogger for different scripts logPath = '.' fileName = 'lpjguesstools' class MultiLineFormatter(logging.Formatter):", "from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler())", "= str.split(record.message) str = str.replace('\\n', '\\n' + ' ' * len(header)) return str", "= \"[%(levelname)-8s] %(message)s\" FILE_FORMAT = \"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d", "[%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S')", "%(message)s\" FILE_FORMAT = \"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile", "class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create sublogger for different", "hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG =", "str.replace('\\n', '\\n' + ' ' * len(header)) return str # optional colored console", "hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>,", "logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) #", "footer = str.split(record.message) str = str.replace('\\n', '\\n' + ' ' * len(header)) return", "\"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d", "logging formatter \"\"\" def format(self, record): str = logging.Formatter.format(self, record) header, footer =", "both formatters should use the default (non-color) MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s]", "logging.Formatter.format(self, record) header, footer = str.split(record.message) str = str.replace('\\n', '\\n' + ' '", "colored console logger (nice!) try: import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record): record.__dict__.update(colorlog.escape_codes)", "logger (nice!) try: import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color =", "numpy as np import sys try: # python 2.7+ from logging import NullHandler", "import sys try: # python 2.7+ from logging import NullHandler except ImportError: class", "record) header, footer = str.split(record.message) str = str.replace('\\n', '\\n' + ' ' *", "import RotatingFileHandler import numpy as np import sys try: # python 2.7+ from", "record.levelname) str = logging.Formatter.format(self, record) header, footer = str.split(record.message) str = str.replace('\\n', '\\n'", "def format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors, record.levelname) str = logging.Formatter.format(self, record) header,", "hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>, SENCKENBERG Biodiversity and Climate Research Centre (BiK-F) email:", "logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000)", "\"\"\" A custom multi-line logging formatter \"\"\" def format(self, record): str = logging.Formatter.format(self,", "sys try: # python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler):", "maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>, SENCKENBERG Biodiversity and Climate Research Centre", "TODO: create sublogger for different scripts logPath = '.' fileName = 'lpjguesstools' class", "record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors, record.levelname) str = logging.Formatter.format(self, record) header, footer = str.split(record.message)", "MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors, record.levelname) str = logging.Formatter.format(self, record)", "default (non-color) MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s] %(message)s\" FILE_FORMAT = \"%(asctime)s [%(levelname)-8s]", "hCons = logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG)", "\"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError: # both formatters should use the default (non-color) MultiLineFormatterColor", "emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create sublogger for different scripts logPath =", "\"[%(levelname)-8s] %(message)s\" FILE_FORMAT = \"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S')", "import numpy as np import sys try: # python 2.7+ from logging import", "* len(header)) return str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError: # both formatters", "MultiLineFormatter(logging.Formatter): \"\"\" A custom multi-line logging formatter \"\"\" def format(self, record): str =", "datefmt='%Y-%m-%d %H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons =", "= logging.Formatter.format(self, record) header, footer = str.split(record.message) str = str.replace('\\n', '\\n' + '", "pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create sublogger for different scripts logPath = '.' fileName", "ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create sublogger for", "NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) # TODO: create sublogger for different scripts", "%H:%M:%S') rootLogger = logging.getLogger(__name__) rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile =", "custom multi-line logging formatter \"\"\" def format(self, record): str = logging.Formatter.format(self, record) header,", "format(self, record): str = logging.Formatter.format(self, record) header, footer = str.split(record.message) str = str.replace('\\n',", "= \"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT,", "%(log_color)s%(message)s%(reset)s\" except ImportError: # both formatters should use the default (non-color) MultiLineFormatterColor =", "should use the default (non-color) MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s] %(message)s\" FILE_FORMAT", "' * len(header)) return str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError: # both", "rootLogger.setLevel(logging.DEBUG) hCons = logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile)", "str = str.replace('\\n', '\\n' + ' ' * len(header)) return str CONS_FORMAT =", "%(message)s (%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger", "# optional colored console logger (nice!) try: import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self,", "rootLogger.addHandler(hFile) EPILOG = \"\"\"<NAME>, SENCKENBERG Biodiversity and Climate Research Centre (BiK-F) email: <EMAIL>", "logPath = '.' fileName = 'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\" A custom multi-line logging", "(%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') rootLogger =", "2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass", "FILE_FORMAT = \"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\" lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S') lfFile =", "console logger (nice!) try: import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color", "python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record):", "= '.' fileName = 'lpjguesstools' class MultiLineFormatter(logging.Formatter): \"\"\" A custom multi-line logging formatter", "'\\n' + ' ' * len(header)) return str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except", "MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s] %(message)s\" FILE_FORMAT = \"%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)\"", "len(header)) return str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\" except ImportError: # both formatters should", "try: import colorlog class MultiLineFormatterColor(colorlog.ColoredFormatter): def format(self, record): record.__dict__.update(colorlog.escape_codes) record.log_color = self.color(self.log_colors, record.levelname)", "the default (non-color) MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT = \"[%(levelname)-8s] %(message)s\" FILE_FORMAT = \"%(asctime)s", "+ ' ' * len(header)) return str # optional colored console logger (nice!)", "# both formatters should use the default (non-color) MultiLineFormatterColor = MultiLineFormatter CONS_FORMAT =", "= logging.StreamHandler() hCons.setFormatter(lfCons) hCons.setLevel(logging.DEBUG) rootLogger.addHandler(hCons) hFile = RotatingFileHandler(\"{0}/{1}.log\".format(logPath, fileName), maxBytes=10000) hFile.setFormatter(lfFile) hFile.setLevel(logging.DEBUG) rootLogger.addHandler(hFile)", "from logging.handlers import RotatingFileHandler import numpy as np import sys try: # python", "self.color(self.log_colors, record.levelname) str = logging.Formatter.format(self, record) header, footer = str.split(record.message) str = str.replace('\\n',", "' * len(header)) return str # optional colored console logger (nice!) try: import", "str.replace('\\n', '\\n' + ' ' * len(header)) return str CONS_FORMAT = \"[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s\"" ]
[ "class Config(AppBundleConfig): SECRET_KEY = 'not-secret-key' SECURITY_SEND_REGISTER_EMAIL = True SECURITY_SEND_PASSWORD_CHANGED_EMAIL = True SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL =", "import AppBundleConfig class Config(AppBundleConfig): SECRET_KEY = 'not-secret-key' SECURITY_SEND_REGISTER_EMAIL = True SECURITY_SEND_PASSWORD_CHANGED_EMAIL = True", "from flask_unchained import AppBundleConfig class Config(AppBundleConfig): SECRET_KEY = 'not-secret-key' SECURITY_SEND_REGISTER_EMAIL = True SECURITY_SEND_PASSWORD_CHANGED_EMAIL", "AppBundleConfig class Config(AppBundleConfig): SECRET_KEY = 'not-secret-key' SECURITY_SEND_REGISTER_EMAIL = True SECURITY_SEND_PASSWORD_CHANGED_EMAIL = True SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL", "Config(AppBundleConfig): SECRET_KEY = 'not-secret-key' SECURITY_SEND_REGISTER_EMAIL = True SECURITY_SEND_PASSWORD_CHANGED_EMAIL = True SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL = True", "flask_unchained import AppBundleConfig class Config(AppBundleConfig): SECRET_KEY = 'not-secret-key' SECURITY_SEND_REGISTER_EMAIL = True SECURITY_SEND_PASSWORD_CHANGED_EMAIL =" ]
[ "train fastText on it. Args: args (TYPE) \"\"\" tok = BertTokenizer.from_pretrained(args.vocab) with open(args.corpus,", "sentence per line in order to be able to train fastText on it.", "and write one sentence per line in order to be able to train", "import tqdm def main(args): \"\"\"Tokenize a corpus and write one sentence per line", "with open(args.corpus, \"r\") as fin, open(args.outfile, \"w\") as feng: for line in tqdm(fin):", "default=None, type=str, required=True, help=\"\") parser.add_argument(\"--outfile\", default=None, type=str, required=True, help=\"\") args = parser.parse_args() main(args)", "args (TYPE) \"\"\" tok = BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\") as fin, open(args.outfile, \"w\")", "x for x in tokenized]) + \"\\n\") if __name__ == '__main__': parser =", "open(args.corpus, \"r\") as fin, open(args.outfile, \"w\") as feng: for line in tqdm(fin): tokenized", "x in tokenized]) + \"\\n\") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\",", "one sentence per line in order to be able to train fastText on", "feng.write(\" \".join([args.prefix + x for x in tokenized]) + \"\\n\") if __name__ ==", "in tokenized]) + \"\\n\") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None,", "if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--vocab\",", "required=True, help=\"\") parser.add_argument(\"--prefix\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--outfile\", default=None, type=str, required=True, help=\"\") args", "open(args.outfile, \"w\") as feng: for line in tqdm(fin): tokenized = tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix", "+ x for x in tokenized]) + \"\\n\") if __name__ == '__main__': parser", "= tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix + x for x in tokenized]) + \"\\n\") if", "parser.add_argument(\"--vocab\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--prefix\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--outfile\", default=None, type=str,", "\"\"\" tok = BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\") as fin, open(args.outfile, \"w\") as feng:", "+ \"\\n\") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str, required=True,", "per line in order to be able to train fastText on it. Args:", "== '__main__': parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--vocab\", default=None, type=str,", "fin, open(args.outfile, \"w\") as feng: for line in tqdm(fin): tokenized = tok.tokenize(line.strip()) feng.write(\"", "parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--vocab\", default=None, type=str, required=True, help=\"\")", "parser.add_argument(\"--prefix\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--outfile\", default=None, type=str, required=True, help=\"\") args = parser.parse_args()", "on it. Args: args (TYPE) \"\"\" tok = BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\") as", "\"\\n\") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str, required=True, help=\"\")", "default=None, type=str, required=True, help=\"\") parser.add_argument(\"--vocab\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--prefix\", default=None, type=str, required=True,", "as fin, open(args.outfile, \"w\") as feng: for line in tqdm(fin): tokenized = tok.tokenize(line.strip())", "= argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--vocab\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--prefix\",", "def main(args): \"\"\"Tokenize a corpus and write one sentence per line in order", "tqdm def main(args): \"\"\"Tokenize a corpus and write one sentence per line in", "default=None, type=str, required=True, help=\"\") parser.add_argument(\"--prefix\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--outfile\", default=None, type=str, required=True,", "__name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--vocab\", default=None,", "to be able to train fastText on it. Args: args (TYPE) \"\"\" tok", "write one sentence per line in order to be able to train fastText", "transformers import BertTokenizer from tqdm import tqdm def main(args): \"\"\"Tokenize a corpus and", "feng: for line in tqdm(fin): tokenized = tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix + x for", "line in tqdm(fin): tokenized = tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix + x for x in", "help=\"\") parser.add_argument(\"--prefix\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--outfile\", default=None, type=str, required=True, help=\"\") args =", "a corpus and write one sentence per line in order to be able", "it. Args: args (TYPE) \"\"\" tok = BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\") as fin,", "(TYPE) \"\"\" tok = BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\") as fin, open(args.outfile, \"w\") as", "fastText on it. Args: args (TYPE) \"\"\" tok = BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\")", "able to train fastText on it. Args: args (TYPE) \"\"\" tok = BertTokenizer.from_pretrained(args.vocab)", "import argparse from transformers import BertTokenizer from tqdm import tqdm def main(args): \"\"\"Tokenize", "be able to train fastText on it. Args: args (TYPE) \"\"\" tok =", "\"w\") as feng: for line in tqdm(fin): tokenized = tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix +", "for line in tqdm(fin): tokenized = tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix + x for x", "tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix + x for x in tokenized]) + \"\\n\") if __name__", "line in order to be able to train fastText on it. Args: args", "= BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\") as fin, open(args.outfile, \"w\") as feng: for line", "help=\"\") parser.add_argument(\"--vocab\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--prefix\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--outfile\", default=None,", "tqdm import tqdm def main(args): \"\"\"Tokenize a corpus and write one sentence per", "to train fastText on it. Args: args (TYPE) \"\"\" tok = BertTokenizer.from_pretrained(args.vocab) with", "in tqdm(fin): tokenized = tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix + x for x in tokenized])", "tok = BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\") as fin, open(args.outfile, \"w\") as feng: for", "Args: args (TYPE) \"\"\" tok = BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\") as fin, open(args.outfile,", "argparse from transformers import BertTokenizer from tqdm import tqdm def main(args): \"\"\"Tokenize a", "type=str, required=True, help=\"\") parser.add_argument(\"--vocab\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--prefix\", default=None, type=str, required=True, help=\"\")", "as feng: for line in tqdm(fin): tokenized = tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix + x", "type=str, required=True, help=\"\") parser.add_argument(\"--prefix\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--outfile\", default=None, type=str, required=True, help=\"\")", "argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--vocab\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--prefix\", default=None,", "\"\"\"Tokenize a corpus and write one sentence per line in order to be", "from tqdm import tqdm def main(args): \"\"\"Tokenize a corpus and write one sentence", "import BertTokenizer from tqdm import tqdm def main(args): \"\"\"Tokenize a corpus and write", "BertTokenizer from tqdm import tqdm def main(args): \"\"\"Tokenize a corpus and write one", "main(args): \"\"\"Tokenize a corpus and write one sentence per line in order to", "'__main__': parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--vocab\", default=None, type=str, required=True,", "corpus and write one sentence per line in order to be able to", "tokenized = tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix + x for x in tokenized]) + \"\\n\")", "\"r\") as fin, open(args.outfile, \"w\") as feng: for line in tqdm(fin): tokenized =", "tqdm(fin): tokenized = tok.tokenize(line.strip()) feng.write(\" \".join([args.prefix + x for x in tokenized]) +", "in order to be able to train fastText on it. Args: args (TYPE)", "order to be able to train fastText on it. Args: args (TYPE) \"\"\"", "BertTokenizer.from_pretrained(args.vocab) with open(args.corpus, \"r\") as fin, open(args.outfile, \"w\") as feng: for line in", "\".join([args.prefix + x for x in tokenized]) + \"\\n\") if __name__ == '__main__':", "required=True, help=\"\") parser.add_argument(\"--vocab\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--prefix\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--outfile\",", "from transformers import BertTokenizer from tqdm import tqdm def main(args): \"\"\"Tokenize a corpus", "tokenized]) + \"\\n\") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument(\"--corpus\", default=None, type=str,", "for x in tokenized]) + \"\\n\") if __name__ == '__main__': parser = argparse.ArgumentParser()", "parser.add_argument(\"--corpus\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--vocab\", default=None, type=str, required=True, help=\"\") parser.add_argument(\"--prefix\", default=None, type=str," ]
[ "import gi gi.require_version(\"Gtk\", \"3.0\") from gi.repository import Gtk builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers", "Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers = { \"onDestroy\": Gtk.main_quit, \"onExitActivate\": Gtk.main_quit } builder.connect_signals(handlers) window =", "from gi.repository import Gtk builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers = { \"onDestroy\": Gtk.main_quit,", "builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers = { \"onDestroy\": Gtk.main_quit, \"onExitActivate\": Gtk.main_quit } builder.connect_signals(handlers)", "Gtk builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers = { \"onDestroy\": Gtk.main_quit, \"onExitActivate\": Gtk.main_quit }", "<gh_stars>1-10 import gi gi.require_version(\"Gtk\", \"3.0\") from gi.repository import Gtk builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\")", "\"3.0\") from gi.repository import Gtk builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers = { \"onDestroy\":", "= { \"onDestroy\": Gtk.main_quit, \"onExitActivate\": Gtk.main_quit } builder.connect_signals(handlers) window = builder.get_object(\"MainWindow\") window.show_all() Gtk.main()", "gi.repository import Gtk builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers = { \"onDestroy\": Gtk.main_quit, \"onExitActivate\":", "import Gtk builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers = { \"onDestroy\": Gtk.main_quit, \"onExitActivate\": Gtk.main_quit", "builder.add_from_file(\"UI.glade\") handlers = { \"onDestroy\": Gtk.main_quit, \"onExitActivate\": Gtk.main_quit } builder.connect_signals(handlers) window = builder.get_object(\"MainWindow\")", "handlers = { \"onDestroy\": Gtk.main_quit, \"onExitActivate\": Gtk.main_quit } builder.connect_signals(handlers) window = builder.get_object(\"MainWindow\") window.show_all()", "gi gi.require_version(\"Gtk\", \"3.0\") from gi.repository import Gtk builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers =", "= Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers = { \"onDestroy\": Gtk.main_quit, \"onExitActivate\": Gtk.main_quit } builder.connect_signals(handlers) window", "gi.require_version(\"Gtk\", \"3.0\") from gi.repository import Gtk builder = Gtk.Builder(); builder.add_from_file(\"UI.glade\") handlers = {" ]
[ "og_domain = 'fr' og_universe = '10' og_id_mother_planet = '1234567' og_planets = {'340001', '340002',", "og_id_mother_planet = '1234567' og_planets = {'340001', '340002', '340003'} #every planets except the target", "Your empire is under attack. Log in as soon as possible!' message_file =", "#every planets except the target one og_main_planet_coordonates = ['1', '300', '6'] og_metal_threshold =", "1000000 og_large_cargo_to_send = '100' mobile_number = '0607080910' default_message = 'Hello Alex. Your empire", "og_large_cargo_to_send = '100' mobile_number = '0607080910' default_message = 'Hello Alex. Your empire is", "og_metal_threshold = 1000000 og_large_cargo_to_send = '100' mobile_number = '0607080910' default_message = 'Hello Alex.", "= '<PASSWORD>' og_domain = 'fr' og_universe = '10' og_id_mother_planet = '1234567' og_planets =", "{'340001', '340002', '340003'} #every planets except the target one og_main_planet_coordonates = ['1', '300',", "'<PASSWORD>' og_domain = 'fr' og_universe = '10' og_id_mother_planet = '1234567' og_planets = {'340001',", "og_universe = '10' og_id_mother_planet = '1234567' og_planets = {'340001', '340002', '340003'} #every planets", "'1234567' og_planets = {'340001', '340002', '340003'} #every planets except the target one og_main_planet_coordonates", "= '100' mobile_number = '0607080910' default_message = 'Hello Alex. Your empire is under", "'100' mobile_number = '0607080910' default_message = 'Hello Alex. Your empire is under attack.", "= '10' og_id_mother_planet = '1234567' og_planets = {'340001', '340002', '340003'} #every planets except", "under attack. Log in as soon as possible!' message_file = '/tmp/message' call_file =", "= 'Hello Alex. Your empire is under attack. Log in as soon as", "'300', '6'] og_metal_threshold = 1000000 og_large_cargo_to_send = '100' mobile_number = '0607080910' default_message =", "= '1234567' og_planets = {'340001', '340002', '340003'} #every planets except the target one", "'Hello Alex. Your empire is under attack. Log in as soon as possible!'", "Alex. Your empire is under attack. Log in as soon as possible!' message_file", "except the target one og_main_planet_coordonates = ['1', '300', '6'] og_metal_threshold = 1000000 og_large_cargo_to_send", "default_message = 'Hello Alex. Your empire is under attack. Log in as soon", "Log in as soon as possible!' message_file = '/tmp/message' call_file = '/tmp/ogame_attack_warning.call' asterisk_outgoing", "as soon as possible!' message_file = '/tmp/message' call_file = '/tmp/ogame_attack_warning.call' asterisk_outgoing = '/var/spool/asterisk/outgoing/'", "'6'] og_metal_threshold = 1000000 og_large_cargo_to_send = '100' mobile_number = '0607080910' default_message = 'Hello", "planets except the target one og_main_planet_coordonates = ['1', '300', '6'] og_metal_threshold = 1000000", "= {'340001', '340002', '340003'} #every planets except the target one og_main_planet_coordonates = ['1',", "og_password = '<PASSWORD>' og_domain = 'fr' og_universe = '10' og_id_mother_planet = '1234567' og_planets", "= 'your_login' og_password = '<PASSWORD>' og_domain = 'fr' og_universe = '10' og_id_mother_planet =", "= '0607080910' default_message = 'Hello Alex. Your empire is under attack. Log in", "'10' og_id_mother_planet = '1234567' og_planets = {'340001', '340002', '340003'} #every planets except the", "one og_main_planet_coordonates = ['1', '300', '6'] og_metal_threshold = 1000000 og_large_cargo_to_send = '100' mobile_number", "og_main_planet_coordonates = ['1', '300', '6'] og_metal_threshold = 1000000 og_large_cargo_to_send = '100' mobile_number =", "<filename>source/config.sample.py<gh_stars>1-10 og_login = 'your_login' og_password = '<PASSWORD>' og_domain = 'fr' og_universe = '10'", "is under attack. Log in as soon as possible!' message_file = '/tmp/message' call_file", "og_login = 'your_login' og_password = '<PASSWORD>' og_domain = 'fr' og_universe = '10' og_id_mother_planet", "mobile_number = '0607080910' default_message = 'Hello Alex. Your empire is under attack. Log", "empire is under attack. Log in as soon as possible!' message_file = '/tmp/message'", "attack. Log in as soon as possible!' message_file = '/tmp/message' call_file = '/tmp/ogame_attack_warning.call'", "'340002', '340003'} #every planets except the target one og_main_planet_coordonates = ['1', '300', '6']", "the target one og_main_planet_coordonates = ['1', '300', '6'] og_metal_threshold = 1000000 og_large_cargo_to_send =", "'340003'} #every planets except the target one og_main_planet_coordonates = ['1', '300', '6'] og_metal_threshold", "= 1000000 og_large_cargo_to_send = '100' mobile_number = '0607080910' default_message = 'Hello Alex. Your", "og_planets = {'340001', '340002', '340003'} #every planets except the target one og_main_planet_coordonates =", "'0607080910' default_message = 'Hello Alex. Your empire is under attack. Log in as", "'fr' og_universe = '10' og_id_mother_planet = '1234567' og_planets = {'340001', '340002', '340003'} #every", "['1', '300', '6'] og_metal_threshold = 1000000 og_large_cargo_to_send = '100' mobile_number = '0607080910' default_message", "'your_login' og_password = '<PASSWORD>' og_domain = 'fr' og_universe = '10' og_id_mother_planet = '1234567'", "in as soon as possible!' message_file = '/tmp/message' call_file = '/tmp/ogame_attack_warning.call' asterisk_outgoing =", "target one og_main_planet_coordonates = ['1', '300', '6'] og_metal_threshold = 1000000 og_large_cargo_to_send = '100'", "= ['1', '300', '6'] og_metal_threshold = 1000000 og_large_cargo_to_send = '100' mobile_number = '0607080910'", "= 'fr' og_universe = '10' og_id_mother_planet = '1234567' og_planets = {'340001', '340002', '340003'}" ]
[ "= input(\"Initialize will override templates, sure you want to proceed? [Y|n] \") if", "\"init\" or argv[0] == \"i\": print(\"Initialize\") print(\"\") read = input(\"Initialize will override templates,", "print(\"Serve\") serve.run_server() elif argv[0] == \"init\" or argv[0] == \"i\": print(\"Initialize\") print(\"\") read", "\"clean\" or argv[0] == \"c\": print(\"Cleaning output folder\") clean.run_clean() elif argv[0] == \"build\"", "build from _erwin import serve from _erwin import clean from _erwin import initialize", "build.main() elif argv[0] == \"serve\" or argv[0] == \"s\": print(\"Serve\") serve.run_server() elif argv[0]", "import serve from _erwin import clean from _erwin import initialize def run(argv): if", "initialize def run(argv): if argv[0] == \"clean\" or argv[0] == \"c\": print(\"Cleaning output", "print(\"\") read = input(\"Initialize will override templates, sure you want to proceed? [Y|n]", "elif argv[0] == \"init\" or argv[0] == \"i\": print(\"Initialize\") print(\"\") read = input(\"Initialize", "to proceed? [Y|n] \") if read == \"Y\": initialize.run_init() else: print(\"Aborted\") else: print(\"usage:", "print(\"Cleaning output folder\") clean.run_clean() elif argv[0] == \"build\" or argv[0] == \"b\": print(\"Build\")", "\"serve\" or argv[0] == \"s\": print(\"Serve\") serve.run_server() elif argv[0] == \"init\" or argv[0]", "sure you want to proceed? [Y|n] \") if read == \"Y\": initialize.run_init() else:", "_erwin import serve from _erwin import clean from _erwin import initialize def run(argv):", "[Y|n] \") if read == \"Y\": initialize.run_init() else: print(\"Aborted\") else: print(\"usage: python erwin.py", "from _erwin import initialize def run(argv): if argv[0] == \"clean\" or argv[0] ==", "elif argv[0] == \"serve\" or argv[0] == \"s\": print(\"Serve\") serve.run_server() elif argv[0] ==", "clean from _erwin import initialize def run(argv): if argv[0] == \"clean\" or argv[0]", "import initialize def run(argv): if argv[0] == \"clean\" or argv[0] == \"c\": print(\"Cleaning", "serve.run_server() elif argv[0] == \"init\" or argv[0] == \"i\": print(\"Initialize\") print(\"\") read =", "run(argv): if argv[0] == \"clean\" or argv[0] == \"c\": print(\"Cleaning output folder\") clean.run_clean()", "will override templates, sure you want to proceed? [Y|n] \") if read ==", "if read == \"Y\": initialize.run_init() else: print(\"Aborted\") else: print(\"usage: python erwin.py build|serve|clean|init b|s|c|i\")", "argv[0] == \"init\" or argv[0] == \"i\": print(\"Initialize\") print(\"\") read = input(\"Initialize will", "clean.run_clean() elif argv[0] == \"build\" or argv[0] == \"b\": print(\"Build\") build.main() elif argv[0]", "== \"c\": print(\"Cleaning output folder\") clean.run_clean() elif argv[0] == \"build\" or argv[0] ==", "proceed? [Y|n] \") if read == \"Y\": initialize.run_init() else: print(\"Aborted\") else: print(\"usage: python", "argv[0] == \"c\": print(\"Cleaning output folder\") clean.run_clean() elif argv[0] == \"build\" or argv[0]", "\") if read == \"Y\": initialize.run_init() else: print(\"Aborted\") else: print(\"usage: python erwin.py build|serve|clean|init", "read = input(\"Initialize will override templates, sure you want to proceed? [Y|n] \")", "or argv[0] == \"c\": print(\"Cleaning output folder\") clean.run_clean() elif argv[0] == \"build\" or", "from _erwin import clean from _erwin import initialize def run(argv): if argv[0] ==", "_erwin import initialize def run(argv): if argv[0] == \"clean\" or argv[0] == \"c\":", "argv[0] == \"s\": print(\"Serve\") serve.run_server() elif argv[0] == \"init\" or argv[0] == \"i\":", "== \"s\": print(\"Serve\") serve.run_server() elif argv[0] == \"init\" or argv[0] == \"i\": print(\"Initialize\")", "\"s\": print(\"Serve\") serve.run_server() elif argv[0] == \"init\" or argv[0] == \"i\": print(\"Initialize\") print(\"\")", "== \"i\": print(\"Initialize\") print(\"\") read = input(\"Initialize will override templates, sure you want", "print(\"Initialize\") print(\"\") read = input(\"Initialize will override templates, sure you want to proceed?", "or argv[0] == \"i\": print(\"Initialize\") print(\"\") read = input(\"Initialize will override templates, sure", "argv[0] == \"b\": print(\"Build\") build.main() elif argv[0] == \"serve\" or argv[0] == \"s\":", "from _erwin import build from _erwin import serve from _erwin import clean from", "argv[0] == \"build\" or argv[0] == \"b\": print(\"Build\") build.main() elif argv[0] == \"serve\"", "or argv[0] == \"s\": print(\"Serve\") serve.run_server() elif argv[0] == \"init\" or argv[0] ==", "argv[0] == \"clean\" or argv[0] == \"c\": print(\"Cleaning output folder\") clean.run_clean() elif argv[0]", "\"b\": print(\"Build\") build.main() elif argv[0] == \"serve\" or argv[0] == \"s\": print(\"Serve\") serve.run_server()", "== \"serve\" or argv[0] == \"s\": print(\"Serve\") serve.run_server() elif argv[0] == \"init\" or", "you want to proceed? [Y|n] \") if read == \"Y\": initialize.run_init() else: print(\"Aborted\")", "folder\") clean.run_clean() elif argv[0] == \"build\" or argv[0] == \"b\": print(\"Build\") build.main() elif", "serve from _erwin import clean from _erwin import initialize def run(argv): if argv[0]", "elif argv[0] == \"build\" or argv[0] == \"b\": print(\"Build\") build.main() elif argv[0] ==", "\"build\" or argv[0] == \"b\": print(\"Build\") build.main() elif argv[0] == \"serve\" or argv[0]", "\"c\": print(\"Cleaning output folder\") clean.run_clean() elif argv[0] == \"build\" or argv[0] == \"b\":", "print(\"Build\") build.main() elif argv[0] == \"serve\" or argv[0] == \"s\": print(\"Serve\") serve.run_server() elif", "import clean from _erwin import initialize def run(argv): if argv[0] == \"clean\" or", "== \"build\" or argv[0] == \"b\": print(\"Build\") build.main() elif argv[0] == \"serve\" or", "override templates, sure you want to proceed? [Y|n] \") if read == \"Y\":", "output folder\") clean.run_clean() elif argv[0] == \"build\" or argv[0] == \"b\": print(\"Build\") build.main()", "\"i\": print(\"Initialize\") print(\"\") read = input(\"Initialize will override templates, sure you want to", "from _erwin import serve from _erwin import clean from _erwin import initialize def", "_erwin import clean from _erwin import initialize def run(argv): if argv[0] == \"clean\"", "def run(argv): if argv[0] == \"clean\" or argv[0] == \"c\": print(\"Cleaning output folder\")", "== \"b\": print(\"Build\") build.main() elif argv[0] == \"serve\" or argv[0] == \"s\": print(\"Serve\")", "argv[0] == \"i\": print(\"Initialize\") print(\"\") read = input(\"Initialize will override templates, sure you", "if argv[0] == \"clean\" or argv[0] == \"c\": print(\"Cleaning output folder\") clean.run_clean() elif", "== \"clean\" or argv[0] == \"c\": print(\"Cleaning output folder\") clean.run_clean() elif argv[0] ==", "import build from _erwin import serve from _erwin import clean from _erwin import", "input(\"Initialize will override templates, sure you want to proceed? [Y|n] \") if read", "== \"init\" or argv[0] == \"i\": print(\"Initialize\") print(\"\") read = input(\"Initialize will override", "_erwin import build from _erwin import serve from _erwin import clean from _erwin", "templates, sure you want to proceed? [Y|n] \") if read == \"Y\": initialize.run_init()", "want to proceed? [Y|n] \") if read == \"Y\": initialize.run_init() else: print(\"Aborted\") else:", "argv[0] == \"serve\" or argv[0] == \"s\": print(\"Serve\") serve.run_server() elif argv[0] == \"init\"", "or argv[0] == \"b\": print(\"Build\") build.main() elif argv[0] == \"serve\" or argv[0] ==" ]
[ "..base import * # noqa INSTALLED_APPS += [ 'django.contrib.admin', 'apps.admin_site', 'apps.backoffice', 'apps.frontoffice', ]", "<reponame>thnee/django-template from ..base import * # noqa INSTALLED_APPS += [ 'django.contrib.admin', 'apps.admin_site', 'apps.backoffice',", "* # noqa INSTALLED_APPS += [ 'django.contrib.admin', 'apps.admin_site', 'apps.backoffice', 'apps.frontoffice', ] ROOT_URLCONF =", "# noqa INSTALLED_APPS += [ 'django.contrib.admin', 'apps.admin_site', 'apps.backoffice', 'apps.frontoffice', ] ROOT_URLCONF = 'apps.system.urls'", "import * # noqa INSTALLED_APPS += [ 'django.contrib.admin', 'apps.admin_site', 'apps.backoffice', 'apps.frontoffice', ] ROOT_URLCONF", "from ..base import * # noqa INSTALLED_APPS += [ 'django.contrib.admin', 'apps.admin_site', 'apps.backoffice', 'apps.frontoffice'," ]
[ "\"AAGG\", \"ACTA\", \"AAGN\", \"TACT\", \"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\", ], \"r1\": [\"1\",", "import unittest import numpy as np import pandas as pd import cassiopeia class", "= os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist = [\"ACTT\", \"TAAG\"] self.multi_case = pd.DataFrame.from_dict( { \"cellBC\": [", "\"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"] = self.multi_case.apply( lambda x: \"_\".join([x.r1, x.r2, x.r3]),", "import numpy as np import pandas as pd import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def", "self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist = [\"ACTT\", \"TAAG\"] self.multi_case = pd.DataFrame.from_dict( { \"cellBC\":", "\"TACT\", \"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\", ], \"r1\": [\"1\", \"1\", \"1\", \"1\",", "\"2\", \"2\", \"2\", \"2\"], \"r3\": [\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\",", "= cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"],", "\"20\", \"20\", \"20\", \"20\", \"20\", \"20\", ], \"CIGAR\": [ \"NA\", \"NA\", \"NA\", \"NA\",", "cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True)", "expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) if __name__ ==", "\"3\", \"3\", \"3\"], \"AlignmentScore\": [ \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\",", "\"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\", ], \"readCount\": [20, 30, 30,", "pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1 ) expected_df =", "self.multi_case, self.whitelist, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df,", "test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] =", "[ \"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"D\", \"D\", ],", "\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\"], \"AlignmentScore\": [ \"20\",", "\"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", ], }", "self.whitelist_fp, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df)", "\"20\", \"20\", ], \"CIGAR\": [ \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\",", "\"AAGN\", \"TACT\", \"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\", ], \"r1\": [\"1\", \"1\", \"1\",", "\"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", ], \"CIGAR\": [ \"NA\", \"NA\",", "30, 30, 40, 50, 10, 10, 15, 10, 10, 10], \"Seq\": [ \"AACCTTGG\",", "self.multi_case[\"readName\"] = self.multi_case.apply( lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"] = self.multi_case.apply(", ") self.multi_case[\"allele\"] = self.multi_case.apply( lambda x: \"_\".join([x.r1, x.r2, x.r3]), axis=1 ) self.corrections =", "= { \"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\", }", "\"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\", ], \"readCount\": [20, 30, 30, 40, 50,", "\"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ], \"intBC\": [ \"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\", \"TACT\", \"TAAG\",", "df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections)", "str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"] = self.multi_case.apply( lambda x: \"_\".join([x.r1, x.r2, x.r3]), axis=1 )", "\"AlignmentScore\": [ \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\",", "\"cellBC\": [ \"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"D\", \"D\",", "inplace=True) pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1 ) expected_df", "\"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"D\", \"D\", ], \"UMI\": [ \"AACCT\",", "], \"CIGAR\": [ \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\",", "\"20\", ], \"CIGAR\": [ \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\",", "15, 10, 10, 10], \"Seq\": [ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\",", "\"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ], \"intBC\":", "\"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\", \"TACT\", \"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\", ], \"r1\":", "\"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"D\", \"D\", ], \"UMI\":", "= cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"],", "[ \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", ],", "\"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\", } def test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1", "30, 40, 50, 10, 10, 15, 10, 10, 10], \"Seq\": [ \"AACCTTGG\", \"AACCTTGG\",", "\"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\", ], \"r1\": [\"1\", \"1\", \"1\", \"1\", \"1\", \"1\",", "{ \"cellBC\": [ \"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"D\",", "def setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path, \"test_files\") self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\")", "\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"], \"r2\": [\"2\", \"2\", \"2\", \"2\",", "expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case,", "], \"UMI\": [ \"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\",", "\"Seq\": [ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\",", "class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path, \"test_files\") self.whitelist_fp =", "10, 10], \"Seq\": [ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\",", "self.multi_case.apply( lambda x: \"_\".join([x.r1, x.r2, x.r3]), axis=1 ) self.corrections = { \"ACTT\": \"ACTT\",", "\"r2\": [\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\"], \"r3\":", "test_files_path = os.path.join(dir_path, \"test_files\") self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist = [\"ACTT\", \"TAAG\"] self.multi_case", "\"1\"], \"r2\": [\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\"],", "lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"] = self.multi_case.apply( lambda x: \"_\".join([x.r1,", "intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) def", "\"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"D\", \"D\", ], \"UMI\": [ \"AACCT\", \"AACCG\",", "\"3\", \"3\", \"3\", \"3\", \"3\"], \"AlignmentScore\": [ \"20\", \"20\", \"20\", \"20\", \"20\", \"20\",", "self.multi_case = pd.DataFrame.from_dict( { \"cellBC\": [ \"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\",", "= pd.DataFrame.from_dict( { \"cellBC\": [ \"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\",", "os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path, \"test_files\") self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist = [\"ACTT\", \"TAAG\"]", "= self.multi_case.apply( lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"] = self.multi_case.apply( lambda", "\"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ], \"intBC\": [ \"ACTT\", \"AAGG\",", "\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\"], \"r3\": [\"3\", \"3\",", "[\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\"], \"r3\": [\"3\",", "\"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ], \"intBC\": [ \"ACTT\", \"AAGG\", \"ACTA\",", "\"20\", \"20\", \"20\", \"20\", ], \"CIGAR\": [ \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\",", "self.multi_case.apply( lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"] = self.multi_case.apply( lambda x:", "\"1\", \"1\", \"1\", \"1\", \"1\", \"1\"], \"r2\": [\"2\", \"2\", \"2\", \"2\", \"2\", \"2\",", "self.multi_case[\"allele\"] = self.multi_case.apply( lambda x: \"_\".join([x.r1, x.r2, x.r3]), axis=1 ) self.corrections = {", "\"2\"], \"r3\": [\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\"],", "\"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", ], } ) self.multi_case[\"readName\"] = self.multi_case.apply(", "10, 10, 10], \"Seq\": [ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\",", "\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"], \"r2\": [\"2\", \"2\", \"2\", \"2\", \"2\",", "\"2\", \"2\", \"2\", \"2\", \"2\", \"2\"], \"r3\": [\"3\", \"3\", \"3\", \"3\", \"3\", \"3\",", "\"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", ], \"CIGAR\": [ \"NA\",", "\"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", ], \"CIGAR\": [", "self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) if __name__ == \"__main__\": unittest.main()", "self.whitelist = [\"ACTT\", \"TAAG\"] self.multi_case = pd.DataFrame.from_dict( { \"cellBC\": [ \"A\", \"A\", \"A\",", "import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path, \"test_files\")", "[\"ACTT\", \"TAAG\"] self.multi_case = pd.DataFrame.from_dict( { \"cellBC\": [ \"A\", \"A\", \"A\", \"B\", \"B\",", "\"AACCTAAA\", ], \"intBC\": [ \"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\", \"TACT\", \"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\",", "\"AACCTTGG\", \"AACCTAAA\", ], \"intBC\": [ \"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\", \"TACT\", \"TAAG\", \"TNNG\", \"ANNN\",", "\"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", ], } ) self.multi_case[\"readName\"] = self.multi_case.apply( lambda", "np import pandas as pd import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path =", "pd.DataFrame.from_dict( { \"cellBC\": [ \"A\", \"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\",", "10, 10, 15, 10, 10, 10], \"Seq\": [ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\",", "[ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ],", "= os.path.join(dir_path, \"test_files\") self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist = [\"ACTT\", \"TAAG\"] self.multi_case =", "\"AAGGG\", ], \"readCount\": [20, 30, 30, 40, 50, 10, 10, 15, 10, 10,", "\"1\", \"1\", \"1\", \"1\"], \"r2\": [\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\",", "\"3\", \"3\", \"3\", \"3\", \"3\", \"3\"], \"AlignmentScore\": [ \"20\", \"20\", \"20\", \"20\", \"20\",", "\"NA\", \"NA\", \"NA\", \"NA\", ], } ) self.multi_case[\"readName\"] = self.multi_case.apply( lambda x: \"_\".join([x.cellBC,", "= os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path, \"test_files\") self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist = [\"ACTT\",", "x.r2, x.r3]), axis=1 ) self.corrections = { \"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\",", "\"C\", \"D\", \"D\", ], \"UMI\": [ \"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\",", "unittest import numpy as np import pandas as pd import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase):", "cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path, \"test_files\") self.whitelist_fp", "\"r3\": [\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\"], \"AlignmentScore\":", "\"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", ], \"CIGAR\": [ \"NA\", \"NA\", \"NA\",", "test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] =", "= self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) if __name__ == \"__main__\":", "cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True)", "\"C\", \"C\", \"D\", \"D\", ], \"UMI\": [ \"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\",", "\"GCTT\", \"NNNN\", \"AAAA\", ], \"r1\": [\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\",", "\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\"], \"AlignmentScore\": [ \"20\", \"20\",", "= [\"ACTT\", \"TAAG\"] self.multi_case = pd.DataFrame.from_dict( { \"cellBC\": [ \"A\", \"A\", \"A\", \"B\",", "\"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", ], \"CIGAR\":", "\"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\", } def test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist(", "\"test_files\") self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist = [\"ACTT\", \"TAAG\"] self.multi_case = pd.DataFrame.from_dict( {", "expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1 )", "\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\"], \"r3\": [\"3\", \"3\", \"3\",", "\"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\", ], \"readCount\": [20, 30,", "[ \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", ],", "\"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ], \"intBC\": [ \"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\", \"TACT\",", "self.whitelist, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df)", "as np import pandas as pd import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path", "40, 50, 10, 10, 15, 10, 10, 10], \"Seq\": [ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\",", "\"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", ], } ) self.multi_case[\"readName\"]", "x.r3]), axis=1 ) self.corrections = { \"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\":", "\"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ], \"intBC\": [ \"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\", \"TACT\", \"TAAG\", \"TNNG\",", "self.corrections = { \"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\",", "\"CIGAR\": [ \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\",", "os.path.join(dir_path, \"test_files\") self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist = [\"ACTT\", \"TAAG\"] self.multi_case = pd.DataFrame.from_dict(", "\"NNNN\", \"AAAA\", ], \"r1\": [\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\",", "\"D\", ], \"UMI\": [ \"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\",", "], \"r1\": [\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"],", "\"readCount\": [20, 30, 30, 40, 50, 10, 10, 15, 10, 10, 10], \"Seq\":", ") expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self):", "\"ACTA\", \"AAGN\", \"TACT\", \"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\", ], \"r1\": [\"1\", \"1\",", "\"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\", ], \"readCount\": [20, 30, 30, 40, 50, 10,", "\"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\", } def test_correct(self): df =", "\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\"], \"AlignmentScore\": [ \"20\", \"20\", \"20\",", "\"20\", \"20\", \"20\", \"20\", \"20\", ], \"CIGAR\": [ \"NA\", \"NA\", \"NA\", \"NA\", \"NA\",", "\"B\", \"C\", \"C\", \"C\", \"C\", \"D\", \"D\", ], \"UMI\": [ \"AACCT\", \"AACCG\", \"AACCC\",", "x: \"_\".join([x.r1, x.r2, x.r3]), axis=1 ) self.corrections = { \"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\",", "= self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self): df =", "\"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\", ], \"r1\": [\"1\", \"1\", \"1\", \"1\", \"1\",", "\"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\", ], \"r1\": [\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\",", "\"TAAG\"] self.multi_case = pd.DataFrame.from_dict( { \"cellBC\": [ \"A\", \"A\", \"A\", \"B\", \"B\", \"C\",", "], \"readCount\": [20, 30, 30, 40, 50, 10, 10, 15, 10, 10, 10],", "\"D\", \"D\", ], \"UMI\": [ \"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\",", "\"3\", \"3\"], \"AlignmentScore\": [ \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\",", "import pandas as pd import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__))", "expected_df) def test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy()", "\"C\", \"C\", \"C\", \"C\", \"D\", \"D\", ], \"UMI\": [ \"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\",", "df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections)", "os import unittest import numpy as np import pandas as pd import cassiopeia", "\"3\"], \"AlignmentScore\": [ \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\",", "10, 15, 10, 10, 10], \"Seq\": [ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\",", "\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"], \"r2\": [\"2\", \"2\",", "\"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ], \"intBC\": [ \"ACTT\",", "], \"intBC\": [ \"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\", \"TACT\", \"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\",", "\"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\", ], \"readCount\":", "\"2\", \"2\", \"2\", \"2\", \"2\"], \"r3\": [\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\",", "lambda x: \"_\".join([x.r1, x.r2, x.r3]), axis=1 ) self.corrections = { \"ACTT\": \"ACTT\", \"TAAG\":", "\"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ], \"intBC\": [", "\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\"], \"r3\": [\"3\", \"3\", \"3\", \"3\",", "\"2\", \"2\", \"2\"], \"r3\": [\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\",", "\"3\", \"3\", \"3\", \"3\"], \"AlignmentScore\": [ \"20\", \"20\", \"20\", \"20\", \"20\", \"20\", \"20\",", "\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\"], \"AlignmentScore\": [ \"20\", \"20\", \"20\", \"20\",", "\"intbc_whitelist.txt\") self.whitelist = [\"ACTT\", \"TAAG\"] self.multi_case = pd.DataFrame.from_dict( { \"cellBC\": [ \"A\", \"A\",", "\"AACCT\", \"AACCT\", \"AAGGG\", ], \"readCount\": [20, 30, 30, 40, 50, 10, 10, 15,", "50, 10, 10, 15, 10, 10, 10], \"Seq\": [ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\",", "\"2\", \"2\"], \"r3\": [\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\",", "\"NA\", ], } ) self.multi_case[\"readName\"] = self.multi_case.apply( lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1", "\"intBC\": [ \"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\", \"TACT\", \"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\",", "\"20\", \"20\", \"20\", ], \"CIGAR\": [ \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\",", "axis=1 ) self.multi_case[\"allele\"] = self.multi_case.apply( lambda x: \"_\".join([x.r1, x.r2, x.r3]), axis=1 ) self.corrections", "= expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist,", "def test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"]", "\"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", ], } )", "\"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"C\", \"C\", \"D\", \"D\", ], \"UMI\": [", "axis=1 ) self.corrections = { \"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\",", "} def test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy()", "self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist(", "} ) self.multi_case[\"readName\"] = self.multi_case.apply( lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"]", "[\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"], \"r2\": [\"2\",", "numpy as np import pandas as pd import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self):", "], } ) self.multi_case[\"readName\"] = self.multi_case.apply( lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1 )", "\"ACTT\", } def test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 ) expected_df =", "\"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\", } def test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp,", "\"TAAG\", \"ANNN\": \"ACTT\", } def test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 )", "os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist = [\"ACTT\", \"TAAG\"] self.multi_case = pd.DataFrame.from_dict( { \"cellBC\": [ \"A\",", "10], \"Seq\": [ \"AACCTTGG\", \"AACCTTGG\", \"AACCTTCC\", \"AACCTTGG\", \"AACCTTGC\", \"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\",", "setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path, \"test_files\") self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist", ") expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) if __name__", "\"1\", \"1\"], \"r2\": [\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\",", "\"1\", \"1\", \"1\"], \"r2\": [\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\",", "\"AAAA\", ], \"r1\": [\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\",", "x.UMI, str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"] = self.multi_case.apply( lambda x: \"_\".join([x.r1, x.r2, x.r3]), axis=1", "{ \"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\", } def", "[ \"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\", ],", "pd import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path,", "\"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\", ], \"readCount\": [20,", "\"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\", ], \"readCount\": [20, 30, 30, 40, 50, 10, 10,", "\"UMI\": [ \"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\",", "\"AACCT\", \"AAGGG\", ], \"readCount\": [20, 30, 30, 40, 50, 10, 10, 15, 10,", "expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist, intbc_dist_thresh=1", "= self.multi_case.apply( lambda x: \"_\".join([x.r1, x.r2, x.r3]), axis=1 ) self.corrections = { \"ACTT\":", ") self.corrections = { \"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\":", "\"ANNN\": \"ACTT\", } def test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 ) expected_df", "[ \"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\", \"TACT\", \"TAAG\", \"TNNG\", \"ANNN\", \"GCTT\", \"NNNN\", \"AAAA\", ],", "\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"], \"r2\": [\"2\", \"2\", \"2\",", "\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\"], \"r3\": [\"3\", \"3\", \"3\", \"3\", \"3\",", "\"AACCTTCC\", \"AACCTTCG\", \"AACCTCAG\", \"AACCTTGG\", \"AACCTTGG\", \"AACCTAAA\", ], \"intBC\": [ \"ACTT\", \"AAGG\", \"ACTA\", \"AAGN\",", "[\"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\", \"3\"], \"AlignmentScore\": [", "\"1\", \"1\", \"1\", \"1\", \"1\"], \"r2\": [\"2\", \"2\", \"2\", \"2\", \"2\", \"2\", \"2\",", "as pd import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path =", "[20, 30, 30, 40, 50, 10, 10, 15, 10, 10, 10], \"Seq\": [", "\"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\", } def test_correct(self): df", "intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) if", "\"NA\", \"NA\", ], } ) self.multi_case[\"readName\"] = self.multi_case.apply( lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]),", "self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df,", "pandas as pd import cassiopeia class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path", "\"NA\", \"NA\", \"NA\", \"NA\", \"NA\", ], } ) self.multi_case[\"readName\"] = self.multi_case.apply( lambda x:", "import os import unittest import numpy as np import pandas as pd import", "TestErrorCorrectIntBCstoWhitelist(unittest.TestCase): def setUp(self): dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path, \"test_files\") self.whitelist_fp = os.path.join(test_files_path,", "\"_\".join([x.r1, x.r2, x.r3]), axis=1 ) self.corrections = { \"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\":", "dir_path = os.path.dirname(os.path.realpath(__file__)) test_files_path = os.path.join(dir_path, \"test_files\") self.whitelist_fp = os.path.join(test_files_path, \"intbc_whitelist.txt\") self.whitelist =", "expected_df = self.multi_case.copy() expected_df[\"intBC\"] = expected_df[\"intBC\"].map(self.corrections) expected_df.dropna(subset=[\"intBC\"], inplace=True) pd.testing.assert_frame_equal(df, expected_df) def test_correct_whitelist_list(self): df", "def test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case, self.whitelist_fp, intbc_dist_thresh=1 ) expected_df = self.multi_case.copy() expected_df[\"intBC\"]", "\"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", \"NA\", ], } ) self.multi_case[\"readName\"] =", "\"ACTT\": \"ACTT\", \"TAAG\": \"TAAG\", \"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\", } def test_correct(self):", "\"ACTA\": \"ACTT\", \"TNNG\": \"TAAG\", \"ANNN\": \"ACTT\", } def test_correct(self): df = cassiopeia.pp.error_correct_intbcs_to_whitelist( self.multi_case,", "\"AACCG\", \"AACCT\", \"AACCG\", \"AAGGA\", \"AACCT\", \"AACCT\", \"AAGGG\", ], \"readCount\": [20, 30, 30, 40,", "x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"] = self.multi_case.apply( lambda x: \"_\".join([x.r1, x.r2,", ") self.multi_case[\"readName\"] = self.multi_case.apply( lambda x: \"_\".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1 ) self.multi_case[\"allele\"] =", "\"r1\": [\"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\", \"1\"], \"r2\":", "\"C\", \"C\", \"C\", \"D\", \"D\", ], \"UMI\": [ \"AACCT\", \"AACCG\", \"AACCC\", \"AACCT\", \"AACCG\",", "\"NA\", \"NA\", \"NA\", ], } ) self.multi_case[\"readName\"] = self.multi_case.apply( lambda x: \"_\".join([x.cellBC, x.UMI," ]
[ "{'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_with_add_operation(self): url =", "self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name':", "Student, Phone class ViewTests(APITestCase): def setUp(self): self.book1 = Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\") self.book2", "\"remove\": [1] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\":", "data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming',", "\"Me\"} ] } ) def test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\") data = { \"name\":", "PUT Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\", args=[self.student.id]) data = {", "\"Data Structures\", \"code\": \"CS310\", \"books\": { \"update\": { 1: {\"title\": \"React Programming\", \"author\":", "'books': [] }, 'phone_numbers': [] } ) def test_post_with_add_operation(self): url = reverse(\"rcourse-list\") data", "}, 'phone_numbers': [] } ) def test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\") data = {", "from django.urls import reverse from rest_framework.test import APITestCase from tests.testapp.models import Book, Course,", "\"yezy\", \"age\": 33, \"course\": 2 } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data,", "33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [] }", "Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [] } ) def test_post_on_writable_nested_foreignkey_related_field(self): url", "\"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {\"title\": \"Primitive Data Types\", \"author\":", "test_post_with_create_operation(self): data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"create\": [ {\"title\":", "{ 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': []", "data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"create\": [ {\"title\":", "Structures', 'author': 'S.Mobit'} ] } ) def test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data", "\"code\": \"CS310\", \"books\": {\"create\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\",", "\"S.Mobit\"} ] } ) def test_put_with_update_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = {", "'CS50', 'books': [] }, 'phone_numbers': [] } ) def test_post_with_add_operation(self): url = reverse(\"rcourse-list\")", ") def test_put_with_update_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\",", "name=\"Yezy\", age=24, course=self.course1 ) self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\",", "'create': [ {'number': '076750000', 'type': 'office'} ] } } response = self.client.post(url, data,", "args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response =", "] } ) def test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\":", "'type': 'Home', 'student': 1} ] } ) def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\", args=[self.student.id])", "{\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [] } ) def", "\"remove\": [1] } } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, {", "= reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {", "import Book, Course, Student, Phone class ViewTests(APITestCase): def setUp(self): self.book1 = Book.objects.create(title=\"Advanced Data", "self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [", "{'number': '073008811', 'type': 'office'} }, 'create': [ {'number': '076750000', 'type': 'office'} ] }", "Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '073008811', 'type': 'office', 'student':", "data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response = self.client.post(url,", "\"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'create': [ {'number': '076750000', 'type': 'office'} ] }", "\"Me\"} ]} } url = reverse(\"wcourse-list\") response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data,", "} ) def test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\":", "\"Programming\", \"code\": \"CS50\"} } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name':", "'update': { 1: {'number': '073008811', 'type': 'office'} }, 'create': [ {'number': '076750000', 'type':", "'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_writable_nested_foreignkey_related_field(self):", "{\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ]} } url", "'code': 'CS50', 'books': [] }, 'phone_numbers': [ {'number': '076750000', 'type': 'office', 'student': 2}", "data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\":", "'office'} ] } } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name':", "url = reverse(\"wcourse-list\") response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data", "\"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": {\"create\": [", "url = reverse(\"rcourse-list\") data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"add\":[1,2]}", "'type': 'Office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } )", "33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"} } response = self.client.put(url, data, format=\"json\") self.assertEqual(", "args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"remove\": [1]", "'CS150', 'books': [ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': []", "\"CS50\", \"books\": { \"remove\": [1] } } } response = self.client.put(url, data, format=\"json\")", "= Course.objects.create( name=\"Data Structures\", code=\"CS210\" ) self.course2 = Course.objects.create( name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1,", "reverse(\"wcourse-list\") response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\":", "'type': 'office'} ] } } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, {", "Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\", args=[self.student.id]) data = { \"name\":", "response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\",", "\"books\": {\"create\": [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ]} } } response =", "Structures', 'author': 'S.Mobit'} ] } ) def test_post_with_create_operation(self): data = { \"name\": \"Data", "\"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data,", "'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [", "'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_many_2_one_relation(self):", ") self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def tearDown(self):", "rest_framework.test import APITestCase from tests.testapp.models import Book, Course, Student, Phone class ViewTests(APITestCase): def", "} response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33,", "Course.objects.create( name=\"Data Structures\", code=\"CS210\" ) self.course2 = Course.objects.create( name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1, self.book2])", "'Home', 'student': 1} ] } ) def test_put_with_add_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data", "= self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': {", "{ 1: {\"title\": \"React Programming\", \"author\": \"M.Json\"} } } } response = self.client.put(url,", "[ {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110',", "Data Structures', 'author': 'S.Mobit'} ] } ) def test_post_with_create_operation(self): data = { \"name\":", "[] } ) def test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\",", "Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # **************** POST Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url =", "type=\"Home\", student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # **************** POST Tests ********************* #", "{\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'update': { 1: {'number': '073008811', 'type': 'office'}", "url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\":", "\"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'create': [", "from tests.testapp.models import Book, Course, Student, Phone class ViewTests(APITestCase): def setUp(self): self.book1 =", "\"code\": \"CS310\", \"books\": {\"add\":[1,2]} } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, {", "reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"add\":", "} ) def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\":", "Book, Course, Student, Phone class ViewTests(APITestCase): def setUp(self): self.book1 = Book.objects.create(title=\"Advanced Data Structures\",", "{\"title\": \"React Programming\", \"author\": \"M.Json\"} ] } ) def test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\",", "APITestCase from tests.testapp.models import Book, Course, Student, Phone class ViewTests(APITestCase): def setUp(self): self.book1", "'Office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def", "Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers':", "'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student':", "'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_post_with_create_operation(self): data = {", "{'number': '076750000', 'type': 'office', 'student': 2} ] } ) # **************** PUT Tests", "reverse(\"rstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response =", "33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'create': [ {'number': '076750000', 'type':", "[ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ]} } } response = self.client.post(url, data,", "'create': [ {'number': '076750000', 'type': 'office'} ] } } response = self.client.put(url, data,", "} ) def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\",", "= self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\":", "{\"add\":[1,2]} } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\",", "33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, } response = self.client.post(url, data, format=\"json\") self.assertEqual(", "}, 'phone_numbers': [ {'number': '073008811', 'type': 'office', 'student': 1}, {'number': '073008880', 'type': 'Home',", "data = { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"remove\": [1] }", "{ 'name': 'Programming', 'code': 'CS50', 'books': [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ]", "}, 'phone_numbers': [ {'number': '076750000', 'type': 'office', 'student': 2} ] } ) #", "def test_put_with_create_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\":", "{\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } } response = self.client.put(url, data,", "\"CS50\"} } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age':", "\"code\": \"CS50\"} } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy',", "\"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'},", "\"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"} } response = self.client.put(url, data, format=\"json\")", "{ \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [] } ) def test_put_with_create_operation(self): url", "setUp(self): self.book1 = Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\")", "'Advanced Data Structures', 'author': 'S.Mobit'}, {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] }", "'author': 'S.Mobit'}, {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } ) def test_put_with_update_operation(self):", "'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880',", "args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"add\": [2]", "test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\":", "] }, 'phone_numbers': [ {'number': '073008811', 'type': 'office', 'student': 1}, {'number': '073008880', 'type':", "'code': 'CS50', 'books': [] }, 'phone_numbers': [] } ) def test_post_with_add_operation(self): url =", "1} ] } ) def test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = {", "{'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '073008811', 'type':", "Structures\", \"code\": \"CS310\", \"books\": {\"add\":[1,2]} } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data,", "'Home', 'student': 1} ] } ) def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data", "reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"create\":", "'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [ {'number': '076750000', 'type': 'office',", ") def test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\":", "Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [] } ) def test_post_on_writable_nested_foreignkey_related_field(self): url =", "= { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"remove\": [1] } }", "{'number': '073008811', 'type': 'office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1}, {'number':", "= { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"create\": [ {\"title\": \"Linear Math\",", "Programming\", \"author\": \"M.Json\"} ] } ) def test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data", "{'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ]", "reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"update\":", "\"name\": \"Programming\", \"code\": \"CS50\", \"books\": { \"remove\": [1] } } } response =", "response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course':", "= { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, } response", "Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\",", "\"code\": \"CS410\", \"books\": [] } ) def test_put_with_create_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data", "\"S.Mobit\"} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880',", "\"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ] } ) def", "def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\":", "{'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ]", "= { \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\":", "{\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'create': [ {'number': '076750000', 'type': 'office'} ]", "'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_put_with_remove_operation(self): url", "Tricks\", \"author\": \"<NAME>\"} ]} } } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data,", "{ \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": {", "[ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [ {'number': '076711110',", "= { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"update\": { 1: {\"title\":", "format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"React", "test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": {", "'CS50', 'books': [] }, 'phone_numbers': [ {'number': '076750000', 'type': 'office', 'student': 2} ]", "\"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\":", "'076750000', 'type': 'office', 'student': 2} ] } ) # **************** PUT Tests *********************", "Course, Student, Phone class ViewTests(APITestCase): def setUp(self): self.book1 = Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\")", "\"age\": 33, \"course\": 2 } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, {", "'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_post_with_create_operation(self):", "Programming\", \"author\": \"M.Json\"} } } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data,", "'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_post_with_create_operation(self): data", "args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"update\": {", "'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number':", "'name': 'Programming', 'code': 'CS50', 'books': [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ] },", "self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [] } ) def", "{ \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"add\": [2] } } response", "Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ]} } url = reverse(\"wcourse-list\")", ") def test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\",", "\"React Programming\", \"author\": \"M.Json\"} } } } response = self.client.put(url, data, format=\"json\") self.assertEqual(", "} } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data", "data = { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"add\": [2] }", "33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [ {'number':", "'phone_numbers': [] } ) def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\") data = { \"name\":", "'type': 'Home', 'student': 1} ] } ) def test_put_with_add_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id])", "= reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\",", "test_put_with_add_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS410\",", "'type': 'office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1}, {'number': '076750000', 'type':", "Types\", \"author\": \"S.Mobit\"} ] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data,", ") def test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\":", "reverse(\"rcourse-list\") data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"add\":[1,2]} } response", "} ) def test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\":", "{\"title\": \"Algebra Three\", \"author\": \"Me\"} ]} } url = reverse(\"wcourse-list\") response = self.client.post(url,", "response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\",", "\"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"} } response = self.client.put(url,", "def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33,", "} ) def test_post_with_add_operation(self): url = reverse(\"rcourse-list\") data = { \"name\": \"Data Structures\",", "student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # **************** POST Tests ********************* # def", "url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\":", "\"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, } response = self.client.post(url, data, format=\"json\")", "Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } )", "1: {'number': '073008811', 'type': 'office'} }, 'create': [ {'number': '076750000', 'type': 'office'} ]", "\"Data Structures\", \"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title':", "\"M.Json\"} } } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\":", "{ \"create\": [ {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } } response", "def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # **************** POST Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self):", "2} ] } ) # **************** PUT Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url", "# **************** POST Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\") data =", "'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [ {'number': '076750000',", "\"Programming\", \"code\": \"CS50\", \"books\": { \"remove\": [1] } } } response = self.client.put(url,", "'076750000', 'type': 'office'} ] } } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data,", "{ 'name': 'Programming', 'code': 'CS150', 'books': [ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"}", "\"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": { \"remove\": [1] }", "response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course':", "Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete()", "\"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [] } ) def test_post_on_writable_nested_foreignkey_related_field(self):", "= reverse(\"rstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2 }", "'073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\",", "= Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\") self.course1 = Course.objects.create( name=\"Data Structures\", code=\"CS210\" ) self.course2", "{ \"update\": { 1: {\"title\": \"React Programming\", \"author\": \"M.Json\"} } } } response", "self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # **************** POST", "def test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33,", "\"phone_numbers\": { 'update': { 1: {'number': '073008811', 'type': 'office'} }, 'create': [ {'number':", "= reverse(\"rstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response", "'course': { 'name': 'Programming', 'code': 'CS150', 'books': [ {\"title\": \"Advanced Data Structures\", \"author\":", "********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\",", "response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books':", "{ \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"React Programming\", \"author\": \"M.Json\"}", "\"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {\"title\": \"Primitive Data", "1} ] } ) def test_put_with_add_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = {", "\"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"create\": [ {\"title\": \"Primitive Data Types\",", "] } ) # **************** PUT Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url =", "\"Algebra Three\", \"author\": \"Me\"} ] } ) def test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\") data", "\"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": { \"remove\": [1]", "'S.Mobit'}, {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } ) def test_put_with_update_operation(self): url", "args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"create\": [", "] } ) def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\":", "] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy',", ") def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33,", "2 } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age':", "ViewTests(APITestCase): def setUp(self): self.book1 = Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic Data", "\"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'create': [ {'number': '076750000', 'type': 'office'}", "'type': 'office', 'student': 2} ] } ) # **************** PUT Tests ********************* #", "'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Advanced", "self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [", "url = reverse(\"rstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2 }", ") # **************** PUT Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\", args=[self.student.id])", "\"books\": { \"update\": { 1: {\"title\": \"React Programming\", \"author\": \"M.Json\"} } } }", "'student': 1} ] } ) def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data =", "Structures\", \"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic", "{ \"remove\": [1] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, {", "Student.objects.create( name=\"Yezy\", age=24, course=self.course1 ) self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\",", "self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50',", "'code': 'CS50', 'books': [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data", "33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": { \"remove\": [1] } }", "Three\", \"author\": \"Me\"} ] } ) def test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\") data =", "def test_put_with_add_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\":", "********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\") data = { \"name\": \"yezy\", \"age\":", "= self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': {", "= { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"create\": [ {\"title\": \"Primitive", "{ 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [ {'number': '076750000', 'type':", "data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, }", "{ \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"create\": [ {\"title\": \"Primitive Data", ") def test_put_with_create_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\",", "\"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ] } ) def test_post_on_deep_nested_fields(self): url =", "\"<NAME>\"} ] }, 'phone_numbers': [] } ) def test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\") data", "\"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ]} } url =", "data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {'title':", "1} ] } ) def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = {", "\"code\": \"CS50\"}, \"phone_numbers\": { 'create': [ {'number': '076750000', 'type': 'office'} ] } }", "Data Structures', 'author': 'S.Mobit'} ] } ) def test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id])", "\"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"React Programming\", \"author\": \"M.Json\"} ] }", "format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {'title': 'Advanced", "def test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\":", "{ 1: {'number': '073008811', 'type': 'office'} }, 'create': [ {'number': '076750000', 'type': 'office'}", "\"Algebra Three\", \"author\": \"Me\"} ]} } url = reverse(\"wcourse-list\") response = self.client.post(url, data,", "'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ]", "= { \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response = self.client.post(url, data,", "{'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_put_with_remove_operation(self): url =", "'student': 1} ] } ) def test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data =", "\"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": {\"create\": [ {\"title\": \"Python", ") self.course2 = Course.objects.create( name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create(", "\"author\": \"S.Mobit\"} ] }, 'phone_numbers': [] } ) def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\")", "'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] },", "{ 'create': [ {'number': '076750000', 'type': 'office'} ] } } response = self.client.post(url,", "'076711110', 'type': 'Office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] }", "Structures\", \"code\": \"CS410\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic", "Data Structures\", author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\") self.course1 = Course.objects.create( name=\"Data", "Three\", \"author\": \"Me\"} ]} } url = reverse(\"wcourse-list\") response = self.client.post(url, data, format=\"json\")", "33, 'course': { 'name': 'Programming', 'code': 'CS150', 'books': [ {\"title\": \"Advanced Data Structures\",", "'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office',", "} response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\":", "= reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": {", "'S.Mobit'} ] } ) def test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = {", "args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\":", "self.course1 = Course.objects.create( name=\"Data Structures\", code=\"CS210\" ) self.course2 = Course.objects.create( name=\"Programming\", code=\"CS150\" )", "Course.objects.all().delete() Student.objects.all().delete() # **************** POST Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\")", "\"author\": \"S.Mobit\"} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number':", "data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"} }", "} } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age':", "format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [] } )", "\"books\": [ {\"title\": \"React Programming\", \"author\": \"M.Json\"} ] } ) def test_put_on_deep_nested_fields(self): url", "1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_with_add_operation(self): url", "\"<NAME>\"} ]} } } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name':", "]} } url = reverse(\"wcourse-list\") response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, {", "\"name\": \"yezy\", \"age\": 33, \"course\": 2 } response = self.client.put(url, data, format=\"json\") self.assertEqual(", "'student': 2} ] } ) # **************** PUT Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self):", "\"Python Tricks\", \"author\": \"<NAME>\"} ]} } } response = self.client.post(url, data, format=\"json\") self.assertEqual(", "\"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"React Programming\", \"author\": \"M.Json\"} ]", "Structures\", \"code\": \"CS310\", \"books\": {\"create\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra", "'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Basic", "class ViewTests(APITestCase): def setUp(self): self.book1 = Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic", "name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create( name=\"Yezy\", age=24, course=self.course1 )", "= { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"add\":[1,2]} } response = self.client.post(url,", "response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [] } ) def test_put_with_create_operation(self):", "} ) def test_post_with_create_operation(self): data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\":", "Structures\", author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\") self.course1 = Course.objects.create( name=\"Data Structures\",", "\"code\": \"CS310\", \"books\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\":", "self.course2.books.set([self.book1]) self.student = Student.objects.create( name=\"Yezy\", age=24, course=self.course1 ) self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student)", "'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\",", "self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [ {'title': 'Advanced Data", "= Student.objects.create( name=\"Yezy\", age=24, course=self.course1 ) self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2 =", "\"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": {\"create\": [ {\"title\":", "'S.Mobit'} ] }, 'phone_numbers': [ {'number': '073008811', 'type': 'office', 'student': 1}, {'number': '073008880',", "'phone_numbers': [] } ) def test_post_with_add_operation(self): url = reverse(\"rcourse-list\") data = { \"name\":", "= reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\":", "'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Basic Data", "format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code':", "\"books\": { \"add\": [2] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data,", "[ {\"title\": \"React Programming\", \"author\": \"M.Json\"} ] } ) def test_put_on_deep_nested_fields(self): url =", "\"books\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ]", "\"create\": [ {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } } response =", "{ \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response = self.client.post(url, data, format=\"json\")", "url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": {", "POST Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\") data = { \"name\":", "{ \"name\": \"Programming\", \"code\": \"CS50\", \"books\": {\"create\": [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"}", "[ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ] }", "} } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy',", "'CS50', 'books': [ {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [", "33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Advanced Data Structures',", "reverse from rest_framework.test import APITestCase from tests.testapp.models import Book, Course, Student, Phone class", "Structures', 'author': 'S.Mobit'}, {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } ) def", "**************** POST Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\") data = {", "'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers':", "format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"Linear", "{ 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'},", "'CS50', 'books': [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures',", "[ {'number': '076750000', 'type': 'office', 'student': 2} ] } ) # **************** PUT", "test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\":", "'type': 'Home', 'student': 1}, {'number': '076750000', 'type': 'office', 'student': 1} ] } )", "Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ] } ) def test_post_on_deep_nested_fields(self):", "'books': [ {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number':", "\"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ]} } url = reverse(\"wcourse-list\") response", "tests.testapp.models import Book, Course, Student, Phone class ViewTests(APITestCase): def setUp(self): self.book1 = Book.objects.create(title=\"Advanced", "\"code\": \"CS410\", \"books\": { \"add\": [2] } } response = self.client.put(url, data, format=\"json\")", "**************** PUT Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\", args=[self.student.id]) data =", "Structures\", \"code\": \"CS410\", \"books\": [] } ) def test_put_with_create_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id])", "\"code\": \"CS310\", \"books\": [ {\"title\": \"React Programming\", \"author\": \"M.Json\"} ] } ) def", "= Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\") self.course1 =", "# def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\":", "\"age\": 33, \"course\": 2 } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, {", "Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1},", "Phone class ViewTests(APITestCase): def setUp(self): self.book1 = Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\") self.book2 =", "data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"add\":[1,2]} } response =", "{'number': '073008880', 'type': 'Home', 'student': 1}, {'number': '076750000', 'type': 'office', 'student': 1} ]", "\"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"add\":[1,2]} } response = self.client.post(url, data, format=\"json\")", "'073008880', 'type': 'Home', 'student': 1}, {'number': '076750000', 'type': 'office', 'student': 1} ] }", "test_put_with_update_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS310\",", "Data Types\", \"author\": \"S.Mobit\"} ] } ) def test_put_with_update_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id])", "'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Basic Data Structures', 'author':", "{\"title\": \"React Programming\", \"author\": \"M.Json\"} } } } response = self.client.put(url, data, format=\"json\")", "data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [] }", "\"author\": \"Me\"} ] } ) def test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\") data = {", "reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"},", "= self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\":", "data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response = self.client.put(url,", "def setUp(self): self.book1 = Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic Data Structures\",", "{'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_post_with_create_operation(self): data =", "'age': 33, 'course': { 'name': 'Programming', 'code': 'CS150', 'books': [ {\"title\": \"Advanced Data", "\"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ]} } url = reverse(\"wcourse-list\") response =", "]} } } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy',", "'type': 'Home', 'student': 1} ] } ) def test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\", args=[self.student.id])", "self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create( name=\"Yezy\", age=24, course=self.course1 ) self.phone1 = Phone.objects.create(number=\"076711110\",", "Structures\", code=\"CS210\" ) self.course2 = Course.objects.create( name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student", "code=\"CS210\" ) self.course2 = Course.objects.create( name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student =", "url = reverse(\"rstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2", "Types\", \"author\": \"S.Mobit\"} ] } ) def test_put_with_update_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data", "'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {\"title\":", "\"CS310\", \"books\": [ {\"title\": \"React Programming\", \"author\": \"M.Json\"} ] } ) def test_put_on_deep_nested_fields(self):", "[ {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } } response = self.client.put(url,", "\"Data Structures\", \"code\": \"CS410\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title':", "'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }", "def test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\":", "'073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\",", "'code': 'CS50', 'books': [ {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers':", "response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS150', 'books':", "'phone_numbers': [] } ) def test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\") data = { \"name\":", "}, 'phone_numbers': [] } ) def test_post_with_add_operation(self): url = reverse(\"rcourse-list\") data = {", "response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data Structures',", "} response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33,", "'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title':", "def test_put_with_update_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\":", "\"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'update': { 1: {'number': '073008811', 'type': 'office'} },", "'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title':", "url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\":", "Structures\", \"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {\"title\": \"Primitive", "\"Data Structures\", \"code\": \"CS310\", \"books\": {\"add\":[1,2]} } response = self.client.post(url, data, format=\"json\") self.assertEqual(", "'office', 'student': 2} ] } ) # **************** PUT Tests ********************* # def", "django.urls import reverse from rest_framework.test import APITestCase from tests.testapp.models import Book, Course, Student,", "\"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": {\"create\": [ {\"title\": \"Python Tricks\", \"author\":", "'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {\"title\": \"Python Tricks\",", "self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS150',", "\"Data Structures\", \"code\": \"CS410\", \"books\": [] } ) def test_put_with_create_operation(self): url = reverse(\"wcourse-detail\",", "[1] } } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name':", "\"CS310\", \"books\": {\"add\":[1,2]} } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\":", ") def test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33,", "'office'} }, 'create': [ {'number': '076750000', 'type': 'office'} ] } } response =", "{ 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [] } ) def", "\"yezy\", \"age\": 33, \"course\": 2 } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data,", "'office'} ] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name':", ") def test_post_with_add_operation(self): url = reverse(\"rcourse-list\") data = { \"name\": \"Data Structures\", \"code\":", "\"author\": \"<NAME>\"} ]} } } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, {", "} ) def test_put_with_update_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data", "test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\":", "{\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } ) def test_put_with_update_operation(self): url =", "def test_post_with_add_operation(self): url = reverse(\"rcourse-list\") data = { \"name\": \"Data Structures\", \"code\": \"CS310\",", "'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] },", ") def test_put_with_add_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\",", "import reverse from rest_framework.test import APITestCase from tests.testapp.models import Book, Course, Student, Phone", "{ \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"remove\": [1] } } response", "reverse(\"rstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response", "self.student = Student.objects.create( name=\"Yezy\", age=24, course=self.course1 ) self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2", "'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def test_put_with_remove_operation(self):", "test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\":", "] } ) def test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\":", "33, \"course\": 2 } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name':", "\"add\": [2] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\":", "1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_many_2_one_relation(self): url", "}, 'create': [ {'number': '076750000', 'type': 'office'} ] } } response = self.client.put(url,", "reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\":", "[ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ] }, 'phone_numbers': [] } ) def", "= { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": {", "\"phone_numbers\": { 'create': [ {'number': '076750000', 'type': 'office'} ] } } response =", "{ \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'update':", "= { \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response = self.client.put(url, data,", "} url = reverse(\"wcourse-list\") response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\":", "= self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\":", "'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS150', 'books': [ {\"title\":", "[] }, 'phone_numbers': [] } ) def test_post_with_add_operation(self): url = reverse(\"rcourse-list\") data =", "\"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"} } response = self.client.put(url, data,", "from rest_framework.test import APITestCase from tests.testapp.models import Book, Course, Student, Phone class ViewTests(APITestCase):", ") def test_post_with_create_operation(self): data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"create\":", "data = { \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\",", "test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": 2", "'books': [ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [] }", "\"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"update\": { 1: {\"title\": \"React Programming\",", "\"code\": \"CS50\"}, \"phone_numbers\": { 'update': { 1: {'number': '073008811', 'type': 'office'} }, 'create':", "# **************** PUT Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\", args=[self.student.id]) data", "data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\":", "\"course\": 2 } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy',", "reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\",", "[ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1}", "1}, {'number': '073008880', 'type': 'Home', 'student': 1}, {'number': '076750000', 'type': 'office', 'student': 1}", "\"React Programming\", \"author\": \"M.Json\"} ] } ) def test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\", args=[self.student.id])", "'books': [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author':", "'Programming', 'code': 'CS50', 'books': [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ] }, 'phone_numbers':", "Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] } ) def", "] }, 'phone_numbers': [] } ) def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\") data =", "self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": []", "url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\":", "'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [ {'number': '076750000', 'type': 'office', 'student':", "{ \"name\": \"yezy\", \"age\": 33, \"course\": 2 } response = self.client.put(url, data, format=\"json\")", "'Programming', 'code': 'CS50', 'books': [ {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] },", "def test_post_with_create_operation(self): data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"create\": [", "self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create( name=\"Yezy\", age=24, course=self.course1 ) self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\",", "\"CS310\", \"books\": {\"create\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\":", "{\"name\": \"Programming\", \"code\": \"CS50\"}, } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, {", "response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [ {'title': 'Advanced Data Structures',", "\"CS310\", \"books\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"}", "import APITestCase from tests.testapp.models import Book, Course, Student, Phone class ViewTests(APITestCase): def setUp(self):", "= reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\":", "'phone_numbers': [ {'number': '076750000', 'type': 'office', 'student': 2} ] } ) # ****************", "\"books\": {\"create\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"}", "\"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } ) def test_put_with_update_operation(self): url = reverse(\"wcourse-detail\",", "{ 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Basic Data Structures', 'author': 'S.Mobit'}", "'phone_numbers': [ {'number': '073008811', 'type': 'office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student':", "\"CS310\", \"books\": { \"update\": { 1: {\"title\": \"React Programming\", \"author\": \"M.Json\"} } }", "response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"Linear Math\", \"author\":", "self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age': 33, 'course': { 'name':", "\"code\": \"CS310\", \"books\": { \"create\": [ {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ]", ") self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create( name=\"Yezy\", age=24, course=self.course1 ) self.phone1 =", "}, 'phone_numbers': [] } ) def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\") data = {", "Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # **************** POST Tests *********************", "'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Advanced Data", "{\"title\": \"Algebra Three\", \"author\": \"Me\"} ] } ) def test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\")", "{\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ]} } } response = self.client.post(url, data, format=\"json\")", "Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '073008811', 'type': 'office', 'student': 1},", "Structures\", \"code\": \"CS410\", \"books\": { \"remove\": [1] } } response = self.client.put(url, data,", "{'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_writable_nested_foreignkey_related_field(self): url =", "args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}", "reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"remove\":", "self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"Linear Math\",", "{ \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"} } response =", "\"Python Tricks\", \"author\": \"<NAME>\"} ] }, 'phone_numbers': [] } ) def test_post_on_many_2_one_relation(self): url", "Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"React Programming\", \"author\": \"M.Json\"} ] } )", "33, \"course\": 2 } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name':", "= { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"} } response", "Structures\", \"code\": \"CS310\", \"books\": { \"create\": [ {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"}", "= { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"add\": [2] } }", "{\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ] }, 'phone_numbers': [] } ) def test_post_on_many_2_one_relation(self):", "'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '073008811',", "\"author\": \"M.Json\"} ] } ) def test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data =", "'type': 'office'} }, 'create': [ {'number': '076750000', 'type': 'office'} ] } } response", "{\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ] } )", "} ) def test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\",", "args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"},", "def test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33,", "reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\":", "= reverse(\"rcourse-list\") data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"add\":[1,2]} }", "33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": {\"create\": [ {\"title\": \"Python Tricks\",", "= Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # **************** POST Tests", "Structures\", author=\"S.Mobit\") self.course1 = Course.objects.create( name=\"Data Structures\", code=\"CS210\" ) self.course2 = Course.objects.create( name=\"Programming\",", "[ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [] } )", "\"CS410\", \"books\": { \"add\": [2] } } response = self.client.put(url, data, format=\"json\") self.assertEqual(", "] } ) def test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\":", "\"CS50\", \"books\": {\"create\": [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ]} } } response", "'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS150', 'books': [ {\"title\": \"Advanced", "tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # **************** POST Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url", "{ \"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": {\"create\":", "'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Advanced Data Structures', 'author':", "test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS410\",", "\"books\": { \"remove\": [1] } } } response = self.client.put(url, data, format=\"json\") self.assertEqual(", "'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': []", "[] }, 'phone_numbers': [ {'number': '076750000', 'type': 'office', 'student': 2} ] } )", "'course': { 'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [] } )", "'CS50', 'books': [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ] }, 'phone_numbers': [] }", "{ \"add\": [2] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, {", "url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\",", "\"name\": \"yezy\", \"age\": 33, \"course\": 2 } response = self.client.post(url, data, format=\"json\") self.assertEqual(", "[1] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data", "\"name\": \"yezy\", \"age\": 33, \"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": { \"remove\":", "'Home', 'student': 1} ] } ) def test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data", "\"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'update': {", "\"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {\"title\": \"Primitive Data Types\",", "'073008811', 'type': 'office'} }, 'create': [ {'number': '076750000', 'type': 'office'} ] } }", "'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '073008811', 'type': 'office', 'student': 1}, {'number':", "[2] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data", "def test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\":", "'author': 'S.Mobit'} ] } ) def test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data =", "] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data", "\"code\": \"CS50\", \"books\": { \"remove\": [1] } } } response = self.client.put(url, data,", "test_post_with_add_operation(self): url = reverse(\"rcourse-list\") data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\":", "{ 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS150', 'books': [", "Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student':", "33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {'title': 'Basic Data Structures',", "data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"update\": { 1:", "self.book1 = Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\") self.course1", "\"CS50\"}, \"phone_numbers\": { 'create': [ {'number': '076750000', 'type': 'office'} ] } } response", "\"CS50\"}, \"phone_numbers\": { 'update': { 1: {'number': '073008811', 'type': 'office'} }, 'create': [", "Structures\", \"code\": \"CS310\", \"books\": { \"update\": { 1: {\"title\": \"React Programming\", \"author\": \"M.Json\"}", "\"Programming\", \"code\": \"CS50\"}, } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name':", "] }, 'phone_numbers': [] } ) def test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\") data =", "{ \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, } response =", "{ \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": { \"update\": { 1: {\"title\": \"React", "\"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"remove\": [1] } } response =", "] } ) def test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\",", "'code': 'CS50', 'books': [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ] }, 'phone_numbers': []", "type=\"Office\", student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() #", "\"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"create\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"},", "2 } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age':", "\"Data Structures\", \"code\": \"CS410\", \"books\": { \"add\": [2] } } response = self.client.put(url,", "} } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\",", "self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [", "'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110',", "\"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'update': { 1:", "\"course\": {\"name\": \"Programming\", \"code\": \"CS50\"} } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data,", "\"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'create': [ {'number':", "\"code\": \"CS50\", \"books\": {\"create\": [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ]} } }", "\"S.Mobit\"} ] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\":", "name=\"Data Structures\", code=\"CS210\" ) self.course2 = Course.objects.create( name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1])", "code=\"CS150\" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create( name=\"Yezy\", age=24, course=self.course1 ) self.phone1", "self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def tearDown(self): Book.objects.all().delete()", "= reverse(\"wcourse-list\") response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\",", "self.course2 = Course.objects.create( name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create( name=\"Yezy\",", "\"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office',", "= reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\",", "\"name\": \"Programming\", \"code\": \"CS50\", \"books\": {\"create\": [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ]}", "'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '073008811', 'type': 'office',", "\"CS410\", \"books\": [] } ) def test_put_with_create_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data =", "\"books\": { \"create\": [ {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } }", "\"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'},", "'Programming', 'code': 'CS50', 'books': [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic", "] } } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy',", "{ \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [ {'title': 'Advanced Data Structures', 'author':", "'name': 'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [] } ) def test_post_with_add_operation(self):", "Course.objects.create( name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create( name=\"Yezy\", age=24, course=self.course1", "'author': 'S.Mobit'} ] } ) def test_post_with_create_operation(self): data = { \"name\": \"Data Structures\",", "'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {\"title\": \"Python", "author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\") self.course1 = Course.objects.create( name=\"Data Structures\", code=\"CS210\"", "'student': 1} ] } ) def test_put_with_add_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data =", "] } ) def test_put_with_add_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\":", "'Programming', 'code': 'CS150', 'books': [ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] },", "\"code\": \"CS50\"}, } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy',", "\"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra", "33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {\"title\": \"Python Tricks\", \"author\":", "response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\",", "\"course\": 2 } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy',", "student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete() Student.objects.all().delete() # ****************", "\"author\": \"S.Mobit\"} ] } ) def test_put_with_update_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data =", "Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\") data = { \"name\": \"yezy\",", "self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data", "] } ) def test_post_with_create_operation(self): data = { \"name\": \"Data Structures\", \"code\": \"CS310\",", "format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [ {'title': 'Advanced", ") def test_post_on_deep_nested_fields(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33,", "{\"create\": [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ]} } } response = self.client.post(url,", "def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\", \"age\": 33, \"course\":", "[ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"}", "[ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'}", "{ \"remove\": [1] } } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data,", "[] } ) def test_put_with_create_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\":", "'name': 'Programming', 'code': 'CS150', 'books': [ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ]", "\"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data", "Book.objects.create(title=\"Advanced Data Structures\", author=\"S.Mobit\") self.book2 = Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\") self.course1 = Course.objects.create(", "[ {'number': '076750000', 'type': 'office'} ] } } response = self.client.put(url, data, format=\"json\")", "\"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, } response = self.client.post(url, data,", "self.book2 = Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\") self.course1 = Course.objects.create( name=\"Data Structures\", code=\"CS210\" )", "{ \"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'create':", "] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880', 'type':", "Data Structures\", author=\"S.Mobit\") self.course1 = Course.objects.create( name=\"Data Structures\", code=\"CS210\" ) self.course2 = Course.objects.create(", "} ) def test_put_with_add_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data", "\"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": { \"add\": [2] } } response =", "\"M.Json\"} ] } ) def test_put_on_deep_nested_fields(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = {", "{ \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"create\": [ {\"title\": \"Linear Math\", \"author\":", "} ) def test_put_on_many_2_one_relation(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\",", "# def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\") data = { \"name\": \"yezy\", \"age\": 33,", "\"CS410\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures',", "\"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author':", "'Programming', 'code': 'CS50', 'books': [] }, 'phone_numbers': [] } ) def test_post_with_add_operation(self): url", "{ \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"},", "{\"create\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ]}", "\"CS410\", \"books\": { \"remove\": [1] } } response = self.client.put(url, data, format=\"json\") self.assertEqual(", "def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33,", "'073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_with_add_operation(self): url = reverse(\"rcourse-detail\",", "}, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number': '073008880', 'type': 'Home',", "'course': { 'name': 'Programming', 'code': 'CS50', 'books': [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"}", "\"CS50\"}, } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age':", "age=24, course=self.course1 ) self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student)", "\"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } } response = self.client.put(url, data, format=\"json\")", "33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'update': { 1: {'number': '073008811',", "{ \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author':", "Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [", "'076750000', 'type': 'office'} ] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data,", "\"books\": {\"add\":[1,2]} } response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data", "'books': [ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [ {'number':", "test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\":", "] } ) def test_put_with_update_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\":", "\"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'create': [ {'number': '076750000',", "self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"React Programming\",", "Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student':", "\"Data Structures\", \"code\": \"CS310\", \"books\": { \"create\": [ {\"title\": \"Primitive Data Types\", \"author\":", "{'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ]", "'073008811', 'type': 'office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1}, {'number': '076750000',", "} ) def test_put_with_remove_operation(self): url = reverse(\"rcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data", "[ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ]} }", "} ) # **************** PUT Tests ********************* # def test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\",", "{'number': '076750000', 'type': 'office'} ] } } response = self.client.post(url, data, format=\"json\") self.assertEqual(", "'code': 'CS150', 'books': [ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers':", "\"code\": \"CS410\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data", "response.data, { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": [ {\"title\": \"React Programming\", \"author\":", "= Course.objects.create( name=\"Programming\", code=\"CS150\" ) self.course1.books.set([self.book1, self.book2]) self.course2.books.set([self.book1]) self.student = Student.objects.create( name=\"Yezy\", age=24,", "Tricks\", \"author\": \"<NAME>\"} ] }, 'phone_numbers': [] } ) def test_post_on_many_2_one_relation(self): url =", "course=self.course1 ) self.phone1 = Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def", "Student.objects.all().delete() # **************** POST Tests ********************* # def test_post_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-list\") data", "Structures\", \"code\": \"CS410\", \"books\": { \"add\": [2] } } response = self.client.put(url, data,", "'books': [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ] }, 'phone_numbers': [] } )", "\"author\": \"Me\"}, {\"title\": \"Algebra Three\", \"author\": \"Me\"} ] } ) def test_post_on_deep_nested_fields(self): url", ") def test_put_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\":", "{ \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"add\":[1,2]} } response = self.client.post(url, data,", "'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_with_add_operation(self):", "\"code\": \"CS410\", \"books\": { \"remove\": [1] } } response = self.client.put(url, data, format=\"json\")", "\"code\": \"CS310\", \"books\": { \"update\": { 1: {\"title\": \"React Programming\", \"author\": \"M.Json\"} }", "\"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {'title': 'Basic Data Structures',", "\"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'update': { 1: {'number':", "\"author\": \"<NAME>\"} ] }, 'phone_numbers': [] } ) def test_post_on_many_2_one_relation(self): url = reverse(\"wstudent-list\")", "} response = self.client.post(url, data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\":", "\"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [] } ) def test_put_with_create_operation(self): url =", "1: {\"title\": \"React Programming\", \"author\": \"M.Json\"} } } } response = self.client.put(url, data,", "\"books\": [] } ) def test_put_with_create_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = {", "test_put_on_pk_nested_foreignkey_related_field(self): url = reverse(\"rstudent-detail\", args=[self.student.id]) data = { \"name\": \"yezy\", \"age\": 33, \"course\":", "\"CS310\", \"books\": { \"create\": [ {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] }", "{ 'name': 'yezy', 'age': 33, 'course': { 'name': 'Programming', 'code': 'CS50', 'books': [", "\"Programming\", \"code\": \"CS50\", \"books\": {\"create\": [ {\"title\": \"Python Tricks\", \"author\": \"<NAME>\"} ]} }", "[ {'number': '076750000', 'type': 'office'} ] } } response = self.client.post(url, data, format=\"json\")", "Data Structures', 'author': 'S.Mobit'}, {\"title\": \"Primitive Data Types\", \"author\": \"S.Mobit\"} ] } )", "'CS150', 'books': [ {\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [", "= Phone.objects.create(number=\"076711110\", type=\"Office\", student=self.student) self.phone2 = Phone.objects.create(number=\"073008880\", type=\"Home\", student=self.student) def tearDown(self): Book.objects.all().delete() Course.objects.all().delete()", "\"author\": \"S.Mobit\"} ] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, {", "{\"title\": \"Advanced Data Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [ {'number': '076711110', 'type':", "data, format=\"json\") self.assertEqual( response.data, { \"name\": \"Data Structures\", \"code\": \"CS410\", \"books\": [ {'title':", "{'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_many_2_one_relation(self): url =", "{ 'update': { 1: {'number': '073008811', 'type': 'office'} }, 'create': [ {'number': '076750000',", "\"author\": \"Me\"} ]} } url = reverse(\"wcourse-list\") response = self.client.post(url, data, format=\"json\") self.assertEqual(", "'books': [] }, 'phone_numbers': [ {'number': '076750000', 'type': 'office', 'student': 2} ] }", "\"update\": { 1: {\"title\": \"React Programming\", \"author\": \"M.Json\"} } } } response =", "test_put_with_create_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data Structures\", \"code\": \"CS310\",", "\"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, \"phone_numbers\": { 'update': { 1: {'number': '073008811', 'type':", "Data Types\", \"author\": \"S.Mobit\"} ] } } response = self.client.put(url, data, format=\"json\") self.assertEqual(", "Structures\", \"author\": \"S.Mobit\"} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1},", "[ {'number': '073008811', 'type': 'office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1},", "\"Data Structures\", \"code\": \"CS310\", \"books\": [ {'title': 'Advanced Data Structures', 'author': 'S.Mobit'}, {\"title\":", "author=\"S.Mobit\") self.course1 = Course.objects.create( name=\"Data Structures\", code=\"CS210\" ) self.course2 = Course.objects.create( name=\"Programming\", code=\"CS150\"", "'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110', 'type': 'Office', 'student': 1}, {'number':", "[] } ) def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\") data = { \"name\": \"yezy\",", "{\"name\": \"Programming\", \"code\": \"CS50\"} } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, {", "\"Data Structures\", \"code\": \"CS310\", \"books\": {\"create\": [ {\"title\": \"Linear Math\", \"author\": \"Me\"}, {\"title\":", "\"S.Mobit\"} ] }, 'phone_numbers': [] } ) def test_post_on_writable_nested_foreignkey_related_field(self): url = reverse(\"wstudent-list\") data", "{'title': 'Basic Data Structures', 'author': 'S.Mobit'} ] }, 'phone_numbers': [ {'number': '076711110', 'type':", "\"course\": { \"name\": \"Programming\", \"code\": \"CS50\", \"books\": { \"remove\": [1] } } }", "'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1}, {'number': '076750000', 'type': 'office', 'student':", "\"author\": \"M.Json\"} } } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, {", "'office', 'student': 1}, {'number': '073008880', 'type': 'Home', 'student': 1}, {'number': '076750000', 'type': 'office',", "Book.objects.create(title=\"Basic Data Structures\", author=\"S.Mobit\") self.course1 = Course.objects.create( name=\"Data Structures\", code=\"CS210\" ) self.course2 =", "1}, {'number': '073008880', 'type': 'Home', 'student': 1} ] } ) def test_put_on_writable_nested_foreignkey_related_field(self): url", "\"Data Structures\", \"code\": \"CS410\", \"books\": { \"remove\": [1] } } response = self.client.put(url,", "[] } ) def test_post_with_add_operation(self): url = reverse(\"rcourse-list\") data = { \"name\": \"Data", "data = { \"name\": \"Data Structures\", \"code\": \"CS310\", \"books\": {\"create\": [ {\"title\": \"Linear", "} } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, { 'name': 'yezy', 'age':", "} ) def test_put_with_create_operation(self): url = reverse(\"wcourse-detail\", args=[self.course2.id]) data = { \"name\": \"Data", "{'number': '076750000', 'type': 'office'} ] } } response = self.client.put(url, data, format=\"json\") self.assertEqual(", "'S.Mobit'} ] } ) def test_post_with_create_operation(self): data = { \"name\": \"Data Structures\", \"code\":", "\"name\": \"yezy\", \"age\": 33, \"course\": {\"name\": \"Programming\", \"code\": \"CS50\"}, } response = self.client.post(url,", "'type': 'office'} ] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data, {", "\"books\": { \"remove\": [1] } } response = self.client.put(url, data, format=\"json\") self.assertEqual( response.data,", "{ \"name\": \"Programming\", \"code\": \"CS50\", \"books\": { \"remove\": [1] } } } response" ]
[ "now() - {range} GROUP BY time({group}) fill(previous)\"\"\".format( range=range, group=group ) rs = influx_client.query(q)", "'ma2' in r: current['ma2'] = r['ma2'] # logger.info(current) if current['time'] and current['price'] and", "'timestamp': current['time'], 'diff': diff, 'ma1': current['ma1'], 'ma2': current['ma2'], } }]) trend = Analyser.checkTrend()", "'currency': 'MA2' }, 'fields': { 'timestamp': current['time'], 'diff': diff, 'ma1': current['ma1'], 'ma2': current['ma2'],", "Storage from .base import * # noqa TrendResult = namedtuple('Trend', ['trend', 'current']) class", "return trend @staticmethod def analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE # 3h", "int(10): when the trending is up and a sell action is required \"\"\"", "in r: current['price'] = r['price'] current['time'] = r['time'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma1})", "{ 'trend': trend } }]) return trend @staticmethod def analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...')", "\"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range,", "influx_client = Storage.get_client() pair = data['measurement'] # tweet = None # position =", "diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags': { 'asset': 'MA1', 'currency': 'MA2' }, 'fields': {", "{range} GROUP BY time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range, group=group ) rs = influx_client.query(q) r", "Check the last 2 records from the last 30m grouped by 1m Returns:", "the trending is down and a sell action is required int(-1): when the", "diff, 'ma1': current['ma1'], 'ma2': current['ma2'], } }]) trend = Analyser.checkTrend() logger.info(trend) return TrendResult(trend,", "state = 'up' elif d2 < d1: # shrinking if d2 <= 0", "logger.info(current) if current['time'] and current['price'] and current['ma1'] and current['ma2']: # diff diff =", ".base import * # noqa TrendResult = namedtuple('Trend', ['trend', 'current']) class Analyser(object): @staticmethod", "<= 0 and d2 > 0: trend = 10 # buy action state", "current['ma1'] - current['ma2'] # logger.info('%s MAs diff: %s', pair, diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF',", "> 0: trend = 10 # buy action state = 'buy' else: trend", "if 'ma2' in r: current['ma2'] = r['ma2'] # logger.info(current) if current['time'] and current['price']", "# 10 ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client = Storage.get_client() pair = data['measurement']", "> d1: # up trend if d1 <= 0 and d2 > 0:", "sell action is required int(-1): when the trending is down int(0): when in", "time(1m) fill(previous)\"\"\" rs = influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return 0 # no", "or no enough data int(1): when the trending is up int(10): when the", "moving_average(mean(\"price\"), {ma2}) as ma2 FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP", "WHERE time > now() - 30m GROUP BY time(1m) fill(previous)\"\"\" rs = influx_client.query(q)", "None, 'ma1': None, 'ma2': None, } # # TODO: Replace 3 queries by", "diff: %s', pair, diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags': { 'asset': 'MA1', 'currency': 'MA2'", "sell action state = 'sell' else: trend = -1 state = 'down' Storage.store([{", "= r['price'] current['time'] = r['time'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as ma1 FROM", "WHERE time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range, group=group", "r: current['price'] = r['price'] current['time'] = r['time'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as", "FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma1=ma1,", "current['ma2']: # diff diff = current['ma1'] - current['ma2'] # logger.info('%s MAs diff: %s',", "current['ma1'] = r['ma1'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as ma2 FROM \"BTC_EUR\" WHERE", "current['ma2'] = r['ma2'] if 'ma2' in r: current['ma2'] = r['ma2'] # logger.info(current) if", "is up int(10): when the trending is up and a sell action is", "from .base import * # noqa TrendResult = namedtuple('Trend', ['trend', 'current']) class Analyser(object):", "'up' elif d2 < d1: # shrinking if d2 <= 0 and d1", "d2: d1 = d1['diff'] d2 = d2['diff'] if d2 > d1: # up", "Analyser(object): @staticmethod def checkTrend(): \"\"\" Check the last 2 records from the last", "as ma2 FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group})", "current['ma2'] = r['ma2'] # logger.info(current) if current['time'] and current['price'] and current['ma1'] and current['ma2']:", "# no enough data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in", "'ma1': None, 'ma2': None, } # # TODO: Replace 3 queries by 1", "trend or no enough data int(1): when the trending is up int(10): when", "if d2 <= 0 and d1 > 0: trend = -10 # sell", "3h group = settings.BOT_DATA_SAMPLE_GROUP # 1m ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10 ma2 =", "10 ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client = Storage.get_client() pair = data['measurement'] #", "influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2'] if 'ma2' in r: current['ma2'] =", "time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma2']", "= 1 state = 'up' elif d2 < d1: # shrinking if d2", "logger.info('%s MAs diff: %s', pair, diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags': { 'asset': 'MA1',", "pair = data['measurement'] # tweet = None # position = '' current =", "ma1 FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format(", "MAs diff: %s', pair, diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags': { 'asset': 'MA1', 'currency':", "import * # noqa TrendResult = namedtuple('Trend', ['trend', 'current']) class Analyser(object): @staticmethod def", "'No trend' influx_client = Storage.get_client() q = \"\"\"SELECT mean(\"diff\") as diff FROM \"MA1_MA2_DIFF\"", "r['time'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as ma1 FROM \"BTC_EUR\" WHERE time >", "'down' Storage.store([{ 'measurement': 'TREND', 'tags': { 'state': state, }, 'fields': { 'trend': trend", "range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2'] if", "'tags': { 'asset': 'MA1', 'currency': 'MA2' }, 'fields': { 'timestamp': current['time'], 'diff': diff,", "group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] if 'price' in r: current['price']", "= list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1'] if 'ma1' in r: current['ma1'] = r['ma1'] q", "and a sell action is required \"\"\" trend = 0 state = 'No", "= 'No trend' influx_client = Storage.get_client() q = \"\"\"SELECT mean(\"diff\") as diff FROM", "'ma1': current['ma1'], 'ma2': current['ma2'], } }]) trend = Analyser.checkTrend() logger.info(trend) return TrendResult(trend, current)", "< 2: return 0 # no enough data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 =", "list(rs.get_points(measurement=pair))[-1] if 'price' in r: current['price'] = r['price'] current['time'] = r['time'] q =", "time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range, group=group )", "rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1'] if 'ma1' in r:", "import settings from .storage import Storage from .base import * # noqa TrendResult", "influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] if 'price' in r: current['price'] = r['price'] current['time'] =", "sell action is required \"\"\" trend = 0 state = 'No trend' influx_client", "# sell action state = 'sell' else: trend = -1 state = 'down'", "d1['diff'] d2 = d2['diff'] if d2 > d1: # up trend if d1", "'ma2': None, } # # TODO: Replace 3 queries by 1 # q", "1 state = 'up' elif d2 < d1: # shrinking if d2 <=", "mean(\"diff\") as diff FROM \"MA1_MA2_DIFF\" WHERE time > now() - 30m GROUP BY", "TODO: Replace 3 queries by 1 # q = \"\"\"SELECT mean(\"price\") as price", "no enough data int(1): when the trending is up int(10): when the trending", "= influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] if 'price' in r: current['price'] = r['price'] current['time']", "d2 > 0: trend = 10 # buy action state = 'buy' else:", "diff diff = current['ma1'] - current['ma2'] # logger.info('%s MAs diff: %s', pair, diff)", "r['ma1'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as ma2 FROM \"BTC_EUR\" WHERE time >", "and d1 > 0: trend = -10 # sell action state = 'sell'", "2: return 0 # no enough data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1]", "= None # position = '' current = { 'time': None, 'price': None,", "'diff' in d1 and 'diff' in d2: d1 = d1['diff'] d2 = d2['diff']", "- {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range, group=group ) rs = influx_client.query(q)", "'buy' else: trend = 1 state = 'up' elif d2 < d1: #", "ma2=ma2, range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2']", "import namedtuple from django.conf import settings from .storage import Storage from .base import", "namedtuple from django.conf import settings from .storage import Storage from .base import *", "ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client = Storage.get_client() pair = data['measurement'] # tweet", "'fields': { 'timestamp': current['time'], 'diff': diff, 'ma1': current['ma1'], 'ma2': current['ma2'], } }]) trend", "- current['ma2'] # logger.info('%s MAs diff: %s', pair, diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags':", "trend @staticmethod def analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE # 3h group", "from collections import namedtuple from django.conf import settings from .storage import Storage from", "-1 state = 'down' Storage.store([{ 'measurement': 'TREND', 'tags': { 'state': state, }, 'fields':", "\"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as ma1 FROM \"BTC_EUR\" WHERE time > now() - {range}", "TrendResult = namedtuple('Trend', ['trend', 'current']) class Analyser(object): @staticmethod def checkTrend(): \"\"\" Check the", "settings from .storage import Storage from .base import * # noqa TrendResult =", "# 20 influx_client = Storage.get_client() pair = data['measurement'] # tweet = None #", "BY time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1]", "in r: current['ma2'] = r['ma2'] # logger.info(current) if current['time'] and current['price'] and current['ma1']", "when the trending is down int(0): when in no trend or no enough", "'ma1' in r: current['ma1'] = r['ma1'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as ma2", "= r['ma2'] if 'ma2' in r: current['ma2'] = r['ma2'] # logger.info(current) if current['time']", "no enough data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in d1", "trend' influx_client = Storage.get_client() q = \"\"\"SELECT mean(\"diff\") as diff FROM \"MA1_MA2_DIFF\" WHERE", "by 1 # q = \"\"\"SELECT mean(\"price\") as price FROM \"BTC_EUR\" WHERE time", "} }]) return trend @staticmethod def analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE", "= { 'time': None, 'price': None, 'ma1': None, 'ma2': None, } # #", "trending is up int(10): when the trending is up and a sell action", "current['price'] and current['ma1'] and current['ma2']: # diff diff = current['ma1'] - current['ma2'] #", "> now() - 30m GROUP BY time(1m) fill(previous)\"\"\" rs = influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF')))", "'trend': trend } }]) return trend @staticmethod def analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...') range", "{range} GROUP BY time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range, group=group ) rs = influx_client.query(q) r", "{ 'asset': 'MA1', 'currency': 'MA2' }, 'fields': { 'timestamp': current['time'], 'diff': diff, 'ma1':", "d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in d1 and 'diff' in", "required int(-1): when the trending is down int(0): when in no trend or", "d1 = d1['diff'] d2 = d2['diff'] if d2 > d1: # up trend", "WHERE time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range, group=group", "django.conf import settings from .storage import Storage from .base import * # noqa", "BY time({group}) fill(previous)\"\"\".format( range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] if", "state = 'buy' else: trend = 1 state = 'up' elif d2 <", "= r['ma2'] # logger.info(current) if current['time'] and current['price'] and current['ma1'] and current['ma2']: #", "30m grouped by 1m Returns: int(-10): when the trending is down and a", "trending is down and a sell action is required int(-1): when the trending", "ma2 FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format(", "list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1'] if 'ma1' in r: current['ma1'] = r['ma1'] q =", "q = \"\"\"SELECT mean(\"diff\") as diff FROM \"MA1_MA2_DIFF\" WHERE time > now() -", "enough data int(1): when the trending is up int(10): when the trending is", "r = list(rs.get_points(measurement=pair))[-1] if 'price' in r: current['price'] = r['price'] current['time'] = r['time']", "is down int(0): when in no trend or no enough data int(1): when", "r: current['ma2'] = r['ma2'] # logger.info(current) if current['time'] and current['price'] and current['ma1'] and", "<filename>src/trader/analyser.py from collections import namedtuple from django.conf import settings from .storage import Storage", "from .storage import Storage from .base import * # noqa TrendResult = namedtuple('Trend',", "GROUP BY time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range, group=group ) rs = influx_client.query(q) r =", "diff FROM \"MA1_MA2_DIFF\" WHERE time > now() - 30m GROUP BY time(1m) fill(previous)\"\"\"", "'asset': 'MA1', 'currency': 'MA2' }, 'fields': { 'timestamp': current['time'], 'diff': diff, 'ma1': current['ma1'],", "fill(previous)\"\"\".format( range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] if 'price' in", "# up trend if d1 <= 0 and d2 > 0: trend =", "\"\"\" Check the last 2 records from the last 30m grouped by 1m", "= 'sell' else: trend = -1 state = 'down' Storage.store([{ 'measurement': 'TREND', 'tags':", "current = { 'time': None, 'price': None, 'ma1': None, 'ma2': None, } #", "1 # q = \"\"\"SELECT mean(\"price\") as price FROM \"BTC_EUR\" WHERE time >", "diff = current['ma1'] - current['ma2'] # logger.info('%s MAs diff: %s', pair, diff) Storage.store([{", "'measurement': 'MA1_MA2_DIFF', 'tags': { 'asset': 'MA1', 'currency': 'MA2' }, 'fields': { 'timestamp': current['time'],", "'time': None, 'price': None, 'ma1': None, 'ma2': None, } # # TODO: Replace", "ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10 ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client = Storage.get_client()", "trend = 10 # buy action state = 'buy' else: trend = 1", "10 # buy action state = 'buy' else: trend = 1 state =", "# logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE # 3h group = settings.BOT_DATA_SAMPLE_GROUP # 1m ma1", "influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return 0 # no enough data d1 =", "= data['measurement'] # tweet = None # position = '' current = {", "current['time'], 'diff': diff, 'ma1': current['ma1'], 'ma2': current['ma2'], } }]) trend = Analyser.checkTrend() logger.info(trend)", "\"\"\"SELECT mean(\"price\") as price FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP", "in d2: d1 = d1['diff'] d2 = d2['diff'] if d2 > d1: #", "# shrinking if d2 <= 0 and d1 > 0: trend = -10", "as ma1 FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group})", "class Analyser(object): @staticmethod def checkTrend(): \"\"\" Check the last 2 records from the", "GROUP BY time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range, group=group ) rs = influx_client.query(q) r =", "group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1'] if 'ma1'", "{range} GROUP BY time({group}) fill(previous)\"\"\".format( range=range, group=group ) rs = influx_client.query(q) r =", "= r['ma1'] if 'ma1' in r: current['ma1'] = r['ma1'] q = \"\"\"SELECT moving_average(mean(\"price\"),", "analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE # 3h group = settings.BOT_DATA_SAMPLE_GROUP #", "# logger.info('%s MAs diff: %s', pair, diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags': { 'asset':", "= influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return 0 # no enough data d1", "= 0 state = 'No trend' influx_client = Storage.get_client() q = \"\"\"SELECT mean(\"diff\")", "= list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in d1 and 'diff' in d2:", "d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in d1 and 'diff' in d2: d1 =", "rs = influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return 0 # no enough data", "\"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range,", "# noqa TrendResult = namedtuple('Trend', ['trend', 'current']) class Analyser(object): @staticmethod def checkTrend(): \"\"\"", "else: trend = 1 state = 'up' elif d2 < d1: # shrinking", "from the last 30m grouped by 1m Returns: int(-10): when the trending is", "q = \"\"\"SELECT mean(\"price\") as price FROM \"BTC_EUR\" WHERE time > now() -", "time > now() - 30m GROUP BY time(1m) fill(previous)\"\"\" rs = influx_client.query(q) if", "time({group}) fill(previous)\"\"\".format( range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] if 'price'", "int(1): when the trending is up int(10): when the trending is up and", "0 and d2 > 0: trend = 10 # buy action state =", "0: trend = 10 # buy action state = 'buy' else: trend =", "when in no trend or no enough data int(1): when the trending is", "'fields': { 'trend': trend } }]) return trend @staticmethod def analyse(data): logger.setLevel(logging.INFO) #", "trend = 0 state = 'No trend' influx_client = Storage.get_client() q = \"\"\"SELECT", "the trending is up int(10): when the trending is up and a sell", "}, 'fields': { 'trend': trend } }]) return trend @staticmethod def analyse(data): logger.setLevel(logging.INFO)", "tweet = None # position = '' current = { 'time': None, 'price':", "# buy action state = 'buy' else: trend = 1 state = 'up'", "as price FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group})", "import Storage from .base import * # noqa TrendResult = namedtuple('Trend', ['trend', 'current'])", "d2['diff'] if d2 > d1: # up trend if d1 <= 0 and", "down int(0): when in no trend or no enough data int(1): when the", "list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2'] if 'ma2' in r: current['ma2'] = r['ma2'] # logger.info(current)", "'current']) class Analyser(object): @staticmethod def checkTrend(): \"\"\" Check the last 2 records from", "current['ma2'] # logger.info('%s MAs diff: %s', pair, diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags': {", "logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE # 3h group = settings.BOT_DATA_SAMPLE_GROUP # 1m ma1 =", "time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma1']", "{ma1}) as ma1 FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY", "now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range, group=group ) rs =", "r: current['ma1'] = r['ma1'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as ma2 FROM \"BTC_EUR\"", "# q = \"\"\"SELECT mean(\"price\") as price FROM \"BTC_EUR\" WHERE time > now()", "\"\"\"SELECT mean(\"diff\") as diff FROM \"MA1_MA2_DIFF\" WHERE time > now() - 30m GROUP", "FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group}) fill(previous)\"\"\".format( range=range,", "data['measurement'] # tweet = None # position = '' current = { 'time':", "= influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2'] if 'ma2' in r: current['ma2']", "Storage.get_client() q = \"\"\"SELECT mean(\"diff\") as diff FROM \"MA1_MA2_DIFF\" WHERE time > now()", "rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] if 'price' in r: current['price'] = r['price']", "= 'up' elif d2 < d1: # shrinking if d2 <= 0 and", "r['ma2'] if 'ma2' in r: current['ma2'] = r['ma2'] # logger.info(current) if current['time'] and", "and current['ma1'] and current['ma2']: # diff diff = current['ma1'] - current['ma2'] # logger.info('%s", "the last 30m grouped by 1m Returns: int(-10): when the trending is down", "range = settings.BOT_DATA_SAMPLE_RANGE # 3h group = settings.BOT_DATA_SAMPLE_GROUP # 1m ma1 = settings.BOT_DATA_SAMPLE_MA1", "@staticmethod def checkTrend(): \"\"\" Check the last 2 records from the last 30m", "None # position = '' current = { 'time': None, 'price': None, 'ma1':", "by 1m Returns: int(-10): when the trending is down and a sell action", "= -10 # sell action state = 'sell' else: trend = -1 state", "'diff': diff, 'ma1': current['ma1'], 'ma2': current['ma2'], } }]) trend = Analyser.checkTrend() logger.info(trend) return", "q = \"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as ma1 FROM \"BTC_EUR\" WHERE time > now()", "current['ma1'] = r['ma1'] if 'ma1' in r: current['ma1'] = r['ma1'] q = \"\"\"SELECT", "# logger.info(current) if current['time'] and current['price'] and current['ma1'] and current['ma2']: # diff diff", "= \"\"\"SELECT mean(\"price\") as price FROM \"BTC_EUR\" WHERE time > now() - {range}", "action state = 'sell' else: trend = -1 state = 'down' Storage.store([{ 'measurement':", "{ 'state': state, }, 'fields': { 'trend': trend } }]) return trend @staticmethod", "down and a sell action is required int(-1): when the trending is down", "noqa TrendResult = namedtuple('Trend', ['trend', 'current']) class Analyser(object): @staticmethod def checkTrend(): \"\"\" Check", "'price' in r: current['price'] = r['price'] current['time'] = r['time'] q = \"\"\"SELECT moving_average(mean(\"price\"),", "if d2 > d1: # up trend if d1 <= 0 and d2", "r = list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2'] if 'ma2' in r: current['ma2'] = r['ma2']", "in no trend or no enough data int(1): when the trending is up", "= '' current = { 'time': None, 'price': None, 'ma1': None, 'ma2': None,", "the trending is down int(0): when in no trend or no enough data", "action is required \"\"\" trend = 0 state = 'No trend' influx_client =", "fill(linear)\"\"\".format( ma2=ma2, range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma2'] =", "# TODO: Replace 3 queries by 1 # q = \"\"\"SELECT mean(\"price\") as", "rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2'] if 'ma2' in r:", "None, 'price': None, 'ma1': None, 'ma2': None, } # # TODO: Replace 3", "int(-10): when the trending is down and a sell action is required int(-1):", "'MA1_MA2_DIFF', 'tags': { 'asset': 'MA1', 'currency': 'MA2' }, 'fields': { 'timestamp': current['time'], 'diff':", "is up and a sell action is required \"\"\" trend = 0 state", "def analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE # 3h group = settings.BOT_DATA_SAMPLE_GROUP", "30m GROUP BY time(1m) fill(previous)\"\"\" rs = influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return", "list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in d1 and 'diff' in d2: d1", "the trending is up and a sell action is required \"\"\" trend =", "- {range} GROUP BY time({group}) fill(previous)\"\"\".format( range=range, group=group ) rs = influx_client.query(q) r", "list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in d1 and 'diff' in d2: d1 = d1['diff'] d2", "d2 <= 0 and d1 > 0: trend = -10 # sell action", "when the trending is up and a sell action is required \"\"\" trend", "['trend', 'current']) class Analyser(object): @staticmethod def checkTrend(): \"\"\" Check the last 2 records", "is required int(-1): when the trending is down int(0): when in no trend", "'TREND', 'tags': { 'state': state, }, 'fields': { 'trend': trend } }]) return", "= settings.BOT_DATA_SAMPLE_MA1 # 10 ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client = Storage.get_client() pair", "range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] if 'price' in r:", "d1 and 'diff' in d2: d1 = d1['diff'] d2 = d2['diff'] if d2", "'MA1', 'currency': 'MA2' }, 'fields': { 'timestamp': current['time'], 'diff': diff, 'ma1': current['ma1'], 'ma2':", "0 and d1 > 0: trend = -10 # sell action state =", "d1 > 0: trend = -10 # sell action state = 'sell' else:", "from django.conf import settings from .storage import Storage from .base import * #", "# position = '' current = { 'time': None, 'price': None, 'ma1': None,", "\"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group}) fill(previous)\"\"\".format( range=range, group=group", "ma1=ma1, range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1']", "required \"\"\" trend = 0 state = 'No trend' influx_client = Storage.get_client() q", "%s', pair, diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags': { 'asset': 'MA1', 'currency': 'MA2' },", "a sell action is required \"\"\" trend = 0 state = 'No trend'", "def checkTrend(): \"\"\" Check the last 2 records from the last 30m grouped", "d2 < d1: # shrinking if d2 <= 0 and d1 > 0:", "= Storage.get_client() pair = data['measurement'] # tweet = None # position = ''", "# tweet = None # position = '' current = { 'time': None,", "BY time(1m) fill(previous)\"\"\" rs = influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return 0 #", "settings.BOT_DATA_SAMPLE_RANGE # 3h group = settings.BOT_DATA_SAMPLE_GROUP # 1m ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10", ") rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] if 'price' in r: current['price'] =", "\"\"\" trend = 0 state = 'No trend' influx_client = Storage.get_client() q =", "last 30m grouped by 1m Returns: int(-10): when the trending is down and", "# 1m ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10 ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client", "up trend if d1 <= 0 and d2 > 0: trend = 10", "d2 = d2['diff'] if d2 > d1: # up trend if d1 <=", "> now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma1=ma1, range=range, group=group ) rs", "buy action state = 'buy' else: trend = 1 state = 'up' elif", "last 2 records from the last 30m grouped by 1m Returns: int(-10): when", "up and a sell action is required \"\"\" trend = 0 state =", "len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return 0 # no enough data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2", "moving_average(mean(\"price\"), {ma1}) as ma1 FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP", ") rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1'] if 'ma1' in", "if 'diff' in d1 and 'diff' in d2: d1 = d1['diff'] d2 =", "- 30m GROUP BY time(1m) fill(previous)\"\"\" rs = influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2:", "= r['ma1'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as ma2 FROM \"BTC_EUR\" WHERE time", "group = settings.BOT_DATA_SAMPLE_GROUP # 1m ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10 ma2 = settings.BOT_DATA_SAMPLE_MA2", "- {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range, group=group ) rs = influx_client.query(q)", "Storage.store([{ 'measurement': 'TREND', 'tags': { 'state': state, }, 'fields': { 'trend': trend }", "= influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1'] if 'ma1' in r: current['ma1']", "and d2 > 0: trend = 10 # buy action state = 'buy'", "GROUP BY time(1m) fill(previous)\"\"\" rs = influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return 0", "int(0): when in no trend or no enough data int(1): when the trending", "when the trending is up int(10): when the trending is up and a", "else: trend = -1 state = 'down' Storage.store([{ 'measurement': 'TREND', 'tags': { 'state':", "Returns: int(-10): when the trending is down and a sell action is required", "fill(linear)\"\"\".format( ma1=ma1, range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma1'] =", "0: trend = -10 # sell action state = 'sell' else: trend =", "= -1 state = 'down' Storage.store([{ 'measurement': 'TREND', 'tags': { 'state': state, },", "= list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in d1 and 'diff' in d2: d1 = d1['diff']", "influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1'] if 'ma1' in r: current['ma1'] =", "time > now() - {range} GROUP BY time({group}) fill(previous)\"\"\".format( range=range, group=group ) rs", "the last 2 records from the last 30m grouped by 1m Returns: int(-10):", "when the trending is down and a sell action is required int(-1): when", "logger.setLevel(logging.INFO) # logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE # 3h group = settings.BOT_DATA_SAMPLE_GROUP # 1m", "= settings.BOT_DATA_SAMPLE_GROUP # 1m ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10 ma2 = settings.BOT_DATA_SAMPLE_MA2 #", "influx_client = Storage.get_client() q = \"\"\"SELECT mean(\"diff\") as diff FROM \"MA1_MA2_DIFF\" WHERE time", "'sell' else: trend = -1 state = 'down' Storage.store([{ 'measurement': 'TREND', 'tags': {", "trending is down int(0): when in no trend or no enough data int(1):", "'measurement': 'TREND', 'tags': { 'state': state, }, 'fields': { 'trend': trend } }])", "elif d2 < d1: # shrinking if d2 <= 0 and d1 >", "1m Returns: int(-10): when the trending is down and a sell action is", "Replace 3 queries by 1 # q = \"\"\"SELECT mean(\"price\") as price FROM", "state = 'sell' else: trend = -1 state = 'down' Storage.store([{ 'measurement': 'TREND',", "= current['ma1'] - current['ma2'] # logger.info('%s MAs diff: %s', pair, diff) Storage.store([{ 'measurement':", "queries by 1 # q = \"\"\"SELECT mean(\"price\") as price FROM \"BTC_EUR\" WHERE", "= list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2'] if 'ma2' in r: current['ma2'] = r['ma2'] #", "if 'ma1' in r: current['ma1'] = r['ma1'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as", "* # noqa TrendResult = namedtuple('Trend', ['trend', 'current']) class Analyser(object): @staticmethod def checkTrend():", "and 'diff' in d2: d1 = d1['diff'] d2 = d2['diff'] if d2 >", "Storage.get_client() pair = data['measurement'] # tweet = None # position = '' current", "2 records from the last 30m grouped by 1m Returns: int(-10): when the", "action state = 'buy' else: trend = 1 state = 'up' elif d2", "= 'buy' else: trend = 1 state = 'up' elif d2 < d1:", "0 state = 'No trend' influx_client = Storage.get_client() q = \"\"\"SELECT mean(\"diff\") as", "is required \"\"\" trend = 0 state = 'No trend' influx_client = Storage.get_client()", "checkTrend(): \"\"\" Check the last 2 records from the last 30m grouped by", "settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client = Storage.get_client() pair = data['measurement'] # tweet = None", "{ 'time': None, 'price': None, 'ma1': None, 'ma2': None, } # # TODO:", "current['time'] and current['price'] and current['ma1'] and current['ma2']: # diff diff = current['ma1'] -", "= settings.BOT_DATA_SAMPLE_RANGE # 3h group = settings.BOT_DATA_SAMPLE_GROUP # 1m ma1 = settings.BOT_DATA_SAMPLE_MA1 #", "'MA2' }, 'fields': { 'timestamp': current['time'], 'diff': diff, 'ma1': current['ma1'], 'ma2': current['ma2'], }", "in r: current['ma1'] = r['ma1'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as ma2 FROM", "= 10 # buy action state = 'buy' else: trend = 1 state", "} # # TODO: Replace 3 queries by 1 # q = \"\"\"SELECT", "> now() - {range} GROUP BY time({group}) fill(previous)\"\"\".format( range=range, group=group ) rs =", "r['ma2'] # logger.info(current) if current['time'] and current['price'] and current['ma1'] and current['ma2']: # diff", "20 influx_client = Storage.get_client() pair = data['measurement'] # tweet = None # position", "and current['ma2']: # diff diff = current['ma1'] - current['ma2'] # logger.info('%s MAs diff:", "# # TODO: Replace 3 queries by 1 # q = \"\"\"SELECT mean(\"price\")", "trend = -10 # sell action state = 'sell' else: trend = -1", "'state': state, }, 'fields': { 'trend': trend } }]) return trend @staticmethod def", "and a sell action is required int(-1): when the trending is down int(0):", "state, }, 'fields': { 'trend': trend } }]) return trend @staticmethod def analyse(data):", "pair, diff) Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags': { 'asset': 'MA1', 'currency': 'MA2' }, 'fields':", "group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2'] if 'ma2'", "> 0: trend = -10 # sell action state = 'sell' else: trend", "namedtuple('Trend', ['trend', 'current']) class Analyser(object): @staticmethod def checkTrend(): \"\"\" Check the last 2", "Storage.store([{ 'measurement': 'MA1_MA2_DIFF', 'tags': { 'asset': 'MA1', 'currency': 'MA2' }, 'fields': { 'timestamp':", "d1: # up trend if d1 <= 0 and d2 > 0: trend", "\"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as ma2 FROM \"BTC_EUR\" WHERE time > now() - {range}", "action is required int(-1): when the trending is down int(0): when in no", "= list(rs.get_points(measurement=pair))[-1] if 'price' in r: current['price'] = r['price'] current['time'] = r['time'] q", "FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma2=ma2,", "state = 'No trend' influx_client = Storage.get_client() q = \"\"\"SELECT mean(\"diff\") as diff", "= namedtuple('Trend', ['trend', 'current']) class Analyser(object): @staticmethod def checkTrend(): \"\"\" Check the last", "'diff' in d2: d1 = d1['diff'] d2 = d2['diff'] if d2 > d1:", "range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1'] if", "grouped by 1m Returns: int(-10): when the trending is down and a sell", "-10 # sell action state = 'sell' else: trend = -1 state =", "r['price'] current['time'] = r['time'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as ma1 FROM \"BTC_EUR\"", "position = '' current = { 'time': None, 'price': None, 'ma1': None, 'ma2':", "= 'down' Storage.store([{ 'measurement': 'TREND', 'tags': { 'state': state, }, 'fields': { 'trend':", ".storage import Storage from .base import * # noqa TrendResult = namedtuple('Trend', ['trend',", "trend = 1 state = 'up' elif d2 < d1: # shrinking if", "1m ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10 ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client =", "# 3h group = settings.BOT_DATA_SAMPLE_GROUP # 1m ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10 ma2", "r = list(rs.get_points(measurement=pair))[-1] current['ma1'] = r['ma1'] if 'ma1' in r: current['ma1'] = r['ma1']", "is down and a sell action is required int(-1): when the trending is", "mean(\"price\") as price FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY", "state = 'down' Storage.store([{ 'measurement': 'TREND', 'tags': { 'state': state, }, 'fields': {", "now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range, group=group ) rs =", "BY time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1]", "if current['time'] and current['price'] and current['ma1'] and current['ma2']: # diff diff = current['ma1']", "}]) return trend @staticmethod def analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE #", "}, 'fields': { 'timestamp': current['time'], 'diff': diff, 'ma1': current['ma1'], 'ma2': current['ma2'], } }])", "FROM \"MA1_MA2_DIFF\" WHERE time > now() - 30m GROUP BY time(1m) fill(previous)\"\"\" rs", "> now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range, group=group ) rs", "now() - 30m GROUP BY time(1m) fill(previous)\"\"\" rs = influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) <", "enough data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in d1 and", "trend } }]) return trend @staticmethod def analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...') range =", "int(-1): when the trending is down int(0): when in no trend or no", "records from the last 30m grouped by 1m Returns: int(-10): when the trending", "= settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client = Storage.get_client() pair = data['measurement'] # tweet =", "None, 'ma2': None, } # # TODO: Replace 3 queries by 1 #", "'price': None, 'ma1': None, 'ma2': None, } # # TODO: Replace 3 queries", "settings.BOT_DATA_SAMPLE_MA1 # 10 ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20 influx_client = Storage.get_client() pair =", "up int(10): when the trending is up and a sell action is required", "= r['time'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as ma1 FROM \"BTC_EUR\" WHERE time", "current['ma1'] and current['ma2']: # diff diff = current['ma1'] - current['ma2'] # logger.info('%s MAs", "fill(previous)\"\"\" rs = influx_client.query(q) if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return 0 # no enough", "shrinking if d2 <= 0 and d1 > 0: trend = -10 #", "'' current = { 'time': None, 'price': None, 'ma1': None, 'ma2': None, }", "<= 0 and d1 > 0: trend = -10 # sell action state", "no trend or no enough data int(1): when the trending is up int(10):", "price FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY time({group}) fill(previous)\"\"\".format(", "0 # no enough data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff'", "trend if d1 <= 0 and d2 > 0: trend = 10 #", "data int(1): when the trending is up int(10): when the trending is up", "data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if 'diff' in d1 and 'diff'", "WHERE time > now() - {range} GROUP BY time({group}) fill(previous)\"\"\".format( range=range, group=group )", "current['price'] = r['price'] current['time'] = r['time'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as ma1", "{ 'timestamp': current['time'], 'diff': diff, 'ma1': current['ma1'], 'ma2': current['ma2'], } }]) trend =", "@staticmethod def analyse(data): logger.setLevel(logging.INFO) # logger.debug('Analysing...') range = settings.BOT_DATA_SAMPLE_RANGE # 3h group =", "= d1['diff'] d2 = d2['diff'] if d2 > d1: # up trend if", "= Storage.get_client() q = \"\"\"SELECT mean(\"diff\") as diff FROM \"MA1_MA2_DIFF\" WHERE time >", "# diff diff = current['ma1'] - current['ma2'] # logger.info('%s MAs diff: %s', pair,", "as diff FROM \"MA1_MA2_DIFF\" WHERE time > now() - 30m GROUP BY time(1m)", "3 queries by 1 # q = \"\"\"SELECT mean(\"price\") as price FROM \"BTC_EUR\"", "return 0 # no enough data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2] d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1] if", "q = \"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as ma2 FROM \"BTC_EUR\" WHERE time > now()", "time > now() - {range} GROUP BY time({group}) fill(linear)\"\"\".format( ma2=ma2, range=range, group=group )", "if 'price' in r: current['price'] = r['price'] current['time'] = r['time'] q = \"\"\"SELECT", "'tags': { 'state': state, }, 'fields': { 'trend': trend } }]) return trend", "= \"\"\"SELECT mean(\"diff\") as diff FROM \"MA1_MA2_DIFF\" WHERE time > now() - 30m", "{ma2}) as ma2 FROM \"BTC_EUR\" WHERE time > now() - {range} GROUP BY", "trend = -1 state = 'down' Storage.store([{ 'measurement': 'TREND', 'tags': { 'state': state,", "a sell action is required int(-1): when the trending is down int(0): when", "and current['price'] and current['ma1'] and current['ma2']: # diff diff = current['ma1'] - current['ma2']", "= \"\"\"SELECT moving_average(mean(\"price\"), {ma2}) as ma2 FROM \"BTC_EUR\" WHERE time > now() -", "current['time'] = r['time'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as ma1 FROM \"BTC_EUR\" WHERE", "if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2: return 0 # no enough data d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2]", "< d1: # shrinking if d2 <= 0 and d1 > 0: trend", "= \"\"\"SELECT moving_average(mean(\"price\"), {ma1}) as ma1 FROM \"BTC_EUR\" WHERE time > now() -", "if d1 <= 0 and d2 > 0: trend = 10 # buy", "GROUP BY time({group}) fill(previous)\"\"\".format( range=range, group=group ) rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1]", "r['ma1'] if 'ma1' in r: current['ma1'] = r['ma1'] q = \"\"\"SELECT moving_average(mean(\"price\"), {ma2})", "\"MA1_MA2_DIFF\" WHERE time > now() - 30m GROUP BY time(1m) fill(previous)\"\"\" rs =", ") rs = influx_client.query(q) r = list(rs.get_points(measurement=pair))[-1] current['ma2'] = r['ma2'] if 'ma2' in", "collections import namedtuple from django.conf import settings from .storage import Storage from .base", "settings.BOT_DATA_SAMPLE_GROUP # 1m ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10 ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20", "None, } # # TODO: Replace 3 queries by 1 # q =", "d1 <= 0 and d2 > 0: trend = 10 # buy action", "d1: # shrinking if d2 <= 0 and d1 > 0: trend =", "= d2['diff'] if d2 > d1: # up trend if d1 <= 0", "in d1 and 'diff' in d2: d1 = d1['diff'] d2 = d2['diff'] if", "d2 > d1: # up trend if d1 <= 0 and d2 >", "trending is up and a sell action is required \"\"\" trend = 0" ]
[ "docs & images. \"\"\" import os import collections import bruhat.render.doc from bruhat.render.front import", "\"line\", end) raise except StopIteration: break def harvest(path, name, dummy=False): print(\"run_tests.harvest\", name) assert", "bruhat.render.front import Canvas, Scale, Base from bruhat.render.box import Box class TestRun(Base): def __init__(self,", "%r\"%name all_names.add(name) svgname = \"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1 # index", "None: cvs = Canvas() cvs.append(Scale(2.0)) box.render(cvs) else: cvs = Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname)", "bruhat.render.box import Box class TestRun(Base): def __init__(self, func, start=None, end=None, img=None, result=None): self.func", "result=items) return start = items.gi_frame.f_lineno # index while 1: try: box = None", "result elif isinstance(result, Canvas): cvs = result else: assert 0, \"%r not understood\"", "isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key = lambda f : (f.__module__, f.__code__.co_firstlineno)) for func in", "collections.Callable): funcs.append(value) funcs.sort(key = lambda f : (f.__module__, f.__code__.co_firstlineno)) for func in funcs:", "in run_test(func, dummy=dummy): yield test def run(): path = os.path.dirname(__file__) names = os.listdir(path)", "stem = name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem __import__(desc) m = getattr(bruhat.render.doc, stem) funcs =", "and name.startswith(\"test_\")] names.sort() for name in names: for test in harvest(path, name, True):", "funcs: for test in run_test(func, dummy=dummy): yield test def run(): path = os.path.dirname(__file__)", "self.start = start self.end = end self.img = img self.result = result all_names", "= getattr(m, attr) if attr.startswith(\"test_\") and isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key = lambda f", "start=None, end=None, img=None, result=None): self.func = func self.start = start self.end = end", "result = items.__next__() if isinstance(result, tuple): result, name = result if isinstance(result, Box):", "for func in funcs: for test in run_test(func, dummy=dummy): yield test def run():", "getattr(bruhat.render.doc, stem) funcs = [] for attr in dir(m): value = getattr(m, attr)", "collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno, result=items) return start = items.gi_frame.f_lineno # index while 1:", "\"bruhat.render.doc.\"+stem __import__(desc) m = getattr(bruhat.render.doc, stem) funcs = [] for attr in dir(m):", "names = os.listdir(path) names = [name for name in names if name.endswith(\".py\") and", "box.render(cvs) else: cvs = Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except: print(\"run_tests: render failed", "\"/dev/null\" pdfname = \"/dev/null\" try: print(\"run_tests: rendering\", name, func) if cvs is None:", "for test in harvest(path, name, True): yield test def main(): for test in", "collections import bruhat.render.doc from bruhat.render.front import Canvas, Scale, Base from bruhat.render.box import Box", "funcs.sort(key = lambda f : (f.__module__, f.__code__.co_firstlineno)) for func in funcs: for test", "+= 1 assert name not in all_names, \"name dup: %r\"%name all_names.add(name) svgname =", "= \"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1 # index test = TestRun(func,", "name.endswith(\".py\") stem = name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem __import__(desc) m = getattr(bruhat.render.doc, stem) funcs", "start self.end = end self.img = img self.result = result all_names = set()", "assert 0, \"%r not understood\" % (result,) if not name: name = \"output-%d\"%counter", "StopIteration: break def harvest(path, name, dummy=False): print(\"run_tests.harvest\", name) assert name.endswith(\".py\") stem = name[:-len(\".py\")]", "run(): path = os.path.dirname(__file__) names = os.listdir(path) names = [name for name in", "self.func = func self.start = start self.end = end self.img = img self.result", "func in funcs: for test in run_test(func, dummy=dummy): yield test def run(): path", "path = os.path.dirname(__file__) names = os.listdir(path) names = [name for name in names", "= None result = items.__next__() if isinstance(result, tuple): result, name = result if", "in all_names, \"name dup: %r\"%name all_names.add(name) svgname = \"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name end", "= getattr(bruhat.render.doc, stem) funcs = [] for attr in dir(m): value = getattr(m,", "cvs = Canvas() cvs.append(Scale(2.0)) box.render(cvs) else: cvs = Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print()", "if isinstance(result, tuple): result, name = result if isinstance(result, Box): box = result", "funcs = [] for attr in dir(m): value = getattr(m, attr) if attr.startswith(\"test_\")", "1: try: box = None cvs = None name = None result =", "0 def run_test(func, dummy=False): global counter items = func() if not isinstance(items, collections.Iterator):", "start = end+1 if dummy: svgname = \"/dev/null\" pdfname = \"/dev/null\" try: print(\"run_tests:", "= \"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1 # index test = TestRun(func, start, end, svgname)", "dir(m): value = getattr(m, attr) if attr.startswith(\"test_\") and isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key =", "bruhat.render.doc from bruhat.render.front import Canvas, Scale, Base from bruhat.render.box import Box class TestRun(Base):", "to rebuild all docs & images. \"\"\" import os import collections import bruhat.render.doc", "Scale, Base from bruhat.render.box import Box class TestRun(Base): def __init__(self, func, start=None, end=None,", "desc = \"bruhat.render.doc.\"+stem __import__(desc) m = getattr(bruhat.render.doc, stem) funcs = [] for attr", "getattr(m, attr) if attr.startswith(\"test_\") and isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key = lambda f :", "test def main(): for test in run(): pass print(\"run_tests.main: finished\") if __name__ ==", "self.end = end self.img = img self.result = result all_names = set() counter", "global counter items = func() if not isinstance(items, collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno, result=items)", "result if isinstance(result, Box): box = result elif isinstance(result, Canvas): cvs = result", "func.__name__, \"line\", end) raise except StopIteration: break def harvest(path, name, dummy=False): print(\"run_tests.harvest\", name)", "Canvas): cvs = result else: assert 0, \"%r not understood\" % (result,) if", "cvs = result else: assert 0, \"%r not understood\" % (result,) if not", "= result else: assert 0, \"%r not understood\" % (result,) if not name:", "print(\"run_tests.harvest\", name) assert name.endswith(\".py\") stem = name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem __import__(desc) m =", "funcs.append(value) funcs.sort(key = lambda f : (f.__module__, f.__code__.co_firstlineno)) for func in funcs: for", "except: print(\"run_tests: render failed for\", name, func.__name__, \"line\", end) raise except StopIteration: break", "name.startswith(\"test_\")] names.sort() for name in names: for test in harvest(path, name, True): yield", "try: print(\"run_tests: rendering\", name, func) if cvs is None: cvs = Canvas() cvs.append(Scale(2.0))", "use mkdoc.py to rebuild all docs & images. \"\"\" import os import collections", "self.img = img self.result = result all_names = set() counter = 0 def", "result all_names = set() counter = 0 def run_test(func, dummy=False): global counter items", "= \"bruhat.render.doc.\"+stem __import__(desc) m = getattr(bruhat.render.doc, stem) funcs = [] for attr in", "= None cvs = None name = None result = items.__next__() if isinstance(result,", "result, name = result if isinstance(result, Box): box = result elif isinstance(result, Canvas):", "not name: name = \"output-%d\"%counter counter += 1 assert name not in all_names,", "set() counter = 0 def run_test(func, dummy=False): global counter items = func() if", "\"/dev/null\" try: print(\"run_tests: rendering\", name, func) if cvs is None: cvs = Canvas()", "python3 \"\"\" Note: use mkdoc.py to rebuild all docs & images. \"\"\" import", "else: assert 0, \"%r not understood\" % (result,) if not name: name =", "f : (f.__module__, f.__code__.co_firstlineno)) for func in funcs: for test in run_test(func, dummy=dummy):", "def main(): for test in run(): pass print(\"run_tests.main: finished\") if __name__ == \"__main__\":", "func) if cvs is None: cvs = Canvas() cvs.append(Scale(2.0)) box.render(cvs) else: cvs =", ": (f.__module__, f.__code__.co_firstlineno)) for func in funcs: for test in run_test(func, dummy=dummy): yield", "(f.__module__, f.__code__.co_firstlineno)) for func in funcs: for test in run_test(func, dummy=dummy): yield test", "start = items.gi_frame.f_lineno # index while 1: try: box = None cvs =", "rebuild all docs & images. \"\"\" import os import collections import bruhat.render.doc from", "class TestRun(Base): def __init__(self, func, start=None, end=None, img=None, result=None): self.func = func self.start", "failed for\", name, func.__name__, \"line\", end) raise except StopIteration: break def harvest(path, name,", "= items.gi_frame.f_lineno-1 # index test = TestRun(func, start, end, svgname) yield test start", "from bruhat.render.front import Canvas, Scale, Base from bruhat.render.box import Box class TestRun(Base): def", "= \"/dev/null\" pdfname = \"/dev/null\" try: print(\"run_tests: rendering\", name, func) if cvs is", "cvs = Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except: print(\"run_tests: render failed for\", name,", "import Canvas, Scale, Base from bruhat.render.box import Box class TestRun(Base): def __init__(self, func,", "counter items = func() if not isinstance(items, collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno, result=items) return", "= TestRun(func, start, end, svgname) yield test start = end+1 if dummy: svgname", "for\", name, func.__name__, \"line\", end) raise except StopIteration: break def harvest(path, name, dummy=False):", "func() if not isinstance(items, collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno, result=items) return start = items.gi_frame.f_lineno", "cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except: print(\"run_tests: render failed for\", name, func.__name__, \"line\", end)", "run_test(func, dummy=False): global counter items = func() if not isinstance(items, collections.Iterator): yield TestRun(func,", "<reponame>punkdit/bruhat #!/usr/bin/env python3 \"\"\" Note: use mkdoc.py to rebuild all docs & images.", "img=None, result=None): self.func = func self.start = start self.end = end self.img =", "os.path.dirname(__file__) names = os.listdir(path) names = [name for name in names if name.endswith(\".py\")", "if cvs is None: cvs = Canvas() cvs.append(Scale(2.0)) box.render(cvs) else: cvs = Canvas([Scale(2.0),", "names.sort() for name in names: for test in harvest(path, name, True): yield test", "= lambda f : (f.__module__, f.__code__.co_firstlineno)) for func in funcs: for test in", "= 0 def run_test(func, dummy=False): global counter items = func() if not isinstance(items,", "Note: use mkdoc.py to rebuild all docs & images. \"\"\" import os import", "name: name = \"output-%d\"%counter counter += 1 assert name not in all_names, \"name", "svgname) yield test start = end+1 if dummy: svgname = \"/dev/null\" pdfname =", "print(\"run_tests: rendering\", name, func) if cvs is None: cvs = Canvas() cvs.append(Scale(2.0)) box.render(cvs)", "\"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1 # index test = TestRun(func, start, end, svgname) yield", "stem) funcs = [] for attr in dir(m): value = getattr(m, attr) if", "TestRun(func, func.__code__.co_firstlineno, result=items) return start = items.gi_frame.f_lineno # index while 1: try: box", "import collections import bruhat.render.doc from bruhat.render.front import Canvas, Scale, Base from bruhat.render.box import", "test start = end+1 if dummy: svgname = \"/dev/null\" pdfname = \"/dev/null\" try:", "[] for attr in dir(m): value = getattr(m, attr) if attr.startswith(\"test_\") and isinstance(value,", "\"\"\" Note: use mkdoc.py to rebuild all docs & images. \"\"\" import os", "= result elif isinstance(result, Canvas): cvs = result else: assert 0, \"%r not", "end self.img = img self.result = result all_names = set() counter = 0", "name, dummy=False): print(\"run_tests.harvest\", name) assert name.endswith(\".py\") stem = name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem __import__(desc)", "value = getattr(m, attr) if attr.startswith(\"test_\") and isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key = lambda", "yield test def main(): for test in run(): pass print(\"run_tests.main: finished\") if __name__", "all_names.add(name) svgname = \"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1 # index test", "\"name dup: %r\"%name all_names.add(name) svgname = \"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1", "1 assert name not in all_names, \"name dup: %r\"%name all_names.add(name) svgname = \"images/%s.svg\"%name", "\"\"\" import os import collections import bruhat.render.doc from bruhat.render.front import Canvas, Scale, Base", "all_names, \"name dup: %r\"%name all_names.add(name) svgname = \"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name end =", "result=None): self.func = func self.start = start self.end = end self.img = img", "box = None cvs = None name = None result = items.__next__() if", "import Box class TestRun(Base): def __init__(self, func, start=None, end=None, img=None, result=None): self.func =", "func.__code__.co_firstlineno, result=items) return start = items.gi_frame.f_lineno # index while 1: try: box =", "None result = items.__next__() if isinstance(result, tuple): result, name = result if isinstance(result,", "= set() counter = 0 def run_test(func, dummy=False): global counter items = func()", "= items.gi_frame.f_lineno # index while 1: try: box = None cvs = None", "test = TestRun(func, start, end, svgname) yield test start = end+1 if dummy:", "Canvas() cvs.append(Scale(2.0)) box.render(cvs) else: cvs = Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except: print(\"run_tests:", "end = items.gi_frame.f_lineno-1 # index test = TestRun(func, start, end, svgname) yield test", "if not isinstance(items, collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno, result=items) return start = items.gi_frame.f_lineno #", "isinstance(result, tuple): result, name = result if isinstance(result, Box): box = result elif", "test def run(): path = os.path.dirname(__file__) names = os.listdir(path) names = [name for", "(result,) if not name: name = \"output-%d\"%counter counter += 1 assert name not", "\"output-%d\"%counter counter += 1 assert name not in all_names, \"name dup: %r\"%name all_names.add(name)", "name in names: for test in harvest(path, name, True): yield test def main():", "svgname = \"/dev/null\" pdfname = \"/dev/null\" try: print(\"run_tests: rendering\", name, func) if cvs", "main(): for test in run(): pass print(\"run_tests.main: finished\") if __name__ == \"__main__\": main()", "import bruhat.render.doc from bruhat.render.front import Canvas, Scale, Base from bruhat.render.box import Box class", "assert name not in all_names, \"name dup: %r\"%name all_names.add(name) svgname = \"images/%s.svg\"%name pdfname", "func self.start = start self.end = end self.img = img self.result = result", "index while 1: try: box = None cvs = None name = None", "render failed for\", name, func.__name__, \"line\", end) raise except StopIteration: break def harvest(path,", "end+1 if dummy: svgname = \"/dev/null\" pdfname = \"/dev/null\" try: print(\"run_tests: rendering\", name,", "is None: cvs = Canvas() cvs.append(Scale(2.0)) box.render(cvs) else: cvs = Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname)", "yield test start = end+1 if dummy: svgname = \"/dev/null\" pdfname = \"/dev/null\"", "= start self.end = end self.img = img self.result = result all_names =", "= None name = None result = items.__next__() if isinstance(result, tuple): result, name", "cvs.append(Scale(2.0)) box.render(cvs) else: cvs = Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except: print(\"run_tests: render", "= name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem __import__(desc) m = getattr(bruhat.render.doc, stem) funcs = []", "attr.startswith(\"test_\") and isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key = lambda f : (f.__module__, f.__code__.co_firstlineno)) for", "lambda f : (f.__module__, f.__code__.co_firstlineno)) for func in funcs: for test in run_test(func,", "= result if isinstance(result, Box): box = result elif isinstance(result, Canvas): cvs =", "for name in names: for test in harvest(path, name, True): yield test def", "# index test = TestRun(func, start, end, svgname) yield test start = end+1", "Box class TestRun(Base): def __init__(self, func, start=None, end=None, img=None, result=None): self.func = func", "not understood\" % (result,) if not name: name = \"output-%d\"%counter counter += 1", "counter += 1 assert name not in all_names, \"name dup: %r\"%name all_names.add(name) svgname", "attr) if attr.startswith(\"test_\") and isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key = lambda f : (f.__module__,", "None cvs = None name = None result = items.__next__() if isinstance(result, tuple):", "dummy=dummy): yield test def run(): path = os.path.dirname(__file__) names = os.listdir(path) names =", "name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem __import__(desc) m = getattr(bruhat.render.doc, stem) funcs = [] for", "= img self.result = result all_names = set() counter = 0 def run_test(func,", "raise except StopIteration: break def harvest(path, name, dummy=False): print(\"run_tests.harvest\", name) assert name.endswith(\".py\") stem", "if dummy: svgname = \"/dev/null\" pdfname = \"/dev/null\" try: print(\"run_tests: rendering\", name, func)", "from bruhat.render.box import Box class TestRun(Base): def __init__(self, func, start=None, end=None, img=None, result=None):", "cvs is None: cvs = Canvas() cvs.append(Scale(2.0)) box.render(cvs) else: cvs = Canvas([Scale(2.0), cvs])", "name in names if name.endswith(\".py\") and name.startswith(\"test_\")] names.sort() for name in names: for", "name.endswith(\".py\") and name.startswith(\"test_\")] names.sort() for name in names: for test in harvest(path, name,", "name = \"output-%d\"%counter counter += 1 assert name not in all_names, \"name dup:", "in funcs: for test in run_test(func, dummy=dummy): yield test def run(): path =", "all docs & images. \"\"\" import os import collections import bruhat.render.doc from bruhat.render.front", "self.result = result all_names = set() counter = 0 def run_test(func, dummy=False): global", "not in all_names, \"name dup: %r\"%name all_names.add(name) svgname = \"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name", "os import collections import bruhat.render.doc from bruhat.render.front import Canvas, Scale, Base from bruhat.render.box", "def __init__(self, func, start=None, end=None, img=None, result=None): self.func = func self.start = start", "names = [name for name in names if name.endswith(\".py\") and name.startswith(\"test_\")] names.sort() for", "end) raise except StopIteration: break def harvest(path, name, dummy=False): print(\"run_tests.harvest\", name) assert name.endswith(\".py\")", "= func() if not isinstance(items, collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno, result=items) return start =", "isinstance(items, collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno, result=items) return start = items.gi_frame.f_lineno # index while", "items = func() if not isinstance(items, collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno, result=items) return start", "name, func.__name__, \"line\", end) raise except StopIteration: break def harvest(path, name, dummy=False): print(\"run_tests.harvest\",", "understood\" % (result,) if not name: name = \"output-%d\"%counter counter += 1 assert", "f.__code__.co_firstlineno)) for func in funcs: for test in run_test(func, dummy=dummy): yield test def", "& images. \"\"\" import os import collections import bruhat.render.doc from bruhat.render.front import Canvas,", "dummy=False): global counter items = func() if not isinstance(items, collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno,", "try: box = None cvs = None name = None result = items.__next__()", "% (result,) if not name: name = \"output-%d\"%counter counter += 1 assert name", "def harvest(path, name, dummy=False): print(\"run_tests.harvest\", name) assert name.endswith(\".py\") stem = name[:-len(\".py\")] desc =", "attr in dir(m): value = getattr(m, attr) if attr.startswith(\"test_\") and isinstance(value, collections.Callable): funcs.append(value)", "name = result if isinstance(result, Box): box = result elif isinstance(result, Canvas): cvs", "= \"/dev/null\" try: print(\"run_tests: rendering\", name, func) if cvs is None: cvs =", "[name for name in names if name.endswith(\".py\") and name.startswith(\"test_\")] names.sort() for name in", "if not name: name = \"output-%d\"%counter counter += 1 assert name not in", "TestRun(Base): def __init__(self, func, start=None, end=None, img=None, result=None): self.func = func self.start =", "print() except: print(\"run_tests: render failed for\", name, func.__name__, \"line\", end) raise except StopIteration:", "= end self.img = img self.result = result all_names = set() counter =", "isinstance(result, Box): box = result elif isinstance(result, Canvas): cvs = result else: assert", "box = result elif isinstance(result, Canvas): cvs = result else: assert 0, \"%r", "pdfname = \"/dev/null\" try: print(\"run_tests: rendering\", name, func) if cvs is None: cvs", "cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except: print(\"run_tests: render failed for\", name, func.__name__, \"line\", end) raise", "dummy=False): print(\"run_tests.harvest\", name) assert name.endswith(\".py\") stem = name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem __import__(desc) m", "= result all_names = set() counter = 0 def run_test(func, dummy=False): global counter", "img self.result = result all_names = set() counter = 0 def run_test(func, dummy=False):", "= [name for name in names if name.endswith(\".py\") and name.startswith(\"test_\")] names.sort() for name", "result else: assert 0, \"%r not understood\" % (result,) if not name: name", "Canvas, Scale, Base from bruhat.render.box import Box class TestRun(Base): def __init__(self, func, start=None,", "import os import collections import bruhat.render.doc from bruhat.render.front import Canvas, Scale, Base from", "= func self.start = start self.end = end self.img = img self.result =", "harvest(path, name, dummy=False): print(\"run_tests.harvest\", name) assert name.endswith(\".py\") stem = name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem", "return start = items.gi_frame.f_lineno # index while 1: try: box = None cvs", "= Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except: print(\"run_tests: render failed for\", name, func.__name__,", "TestRun(func, start, end, svgname) yield test start = end+1 if dummy: svgname =", "__import__(desc) m = getattr(bruhat.render.doc, stem) funcs = [] for attr in dir(m): value", "= os.path.dirname(__file__) names = os.listdir(path) names = [name for name in names if", "for name in names if name.endswith(\".py\") and name.startswith(\"test_\")] names.sort() for name in names:", "if name.endswith(\".py\") and name.startswith(\"test_\")] names.sort() for name in names: for test in harvest(path,", "rendering\", name, func) if cvs is None: cvs = Canvas() cvs.append(Scale(2.0)) box.render(cvs) else:", "if attr.startswith(\"test_\") and isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key = lambda f : (f.__module__, f.__code__.co_firstlineno))", "None name = None result = items.__next__() if isinstance(result, tuple): result, name =", "while 1: try: box = None cvs = None name = None result", "start, end, svgname) yield test start = end+1 if dummy: svgname = \"/dev/null\"", "dup: %r\"%name all_names.add(name) svgname = \"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1 #", "harvest(path, name, True): yield test def main(): for test in run(): pass print(\"run_tests.main:", "dummy: svgname = \"/dev/null\" pdfname = \"/dev/null\" try: print(\"run_tests: rendering\", name, func) if", "images. \"\"\" import os import collections import bruhat.render.doc from bruhat.render.front import Canvas, Scale,", "for test in run_test(func, dummy=dummy): yield test def run(): path = os.path.dirname(__file__) names", "cvs.writePDFfile(pdfname) print() except: print(\"run_tests: render failed for\", name, func.__name__, \"line\", end) raise except", "tuple): result, name = result if isinstance(result, Box): box = result elif isinstance(result,", "Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except: print(\"run_tests: render failed for\", name, func.__name__, \"line\",", "name, True): yield test def main(): for test in run(): pass print(\"run_tests.main: finished\")", "if isinstance(result, Box): box = result elif isinstance(result, Canvas): cvs = result else:", "= os.listdir(path) names = [name for name in names if name.endswith(\".py\") and name.startswith(\"test_\")]", "\"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1 # index test = TestRun(func, start,", "yield TestRun(func, func.__code__.co_firstlineno, result=items) return start = items.gi_frame.f_lineno # index while 1: try:", "name, func) if cvs is None: cvs = Canvas() cvs.append(Scale(2.0)) box.render(cvs) else: cvs", "except StopIteration: break def harvest(path, name, dummy=False): print(\"run_tests.harvest\", name) assert name.endswith(\".py\") stem =", "all_names = set() counter = 0 def run_test(func, dummy=False): global counter items =", "Box): box = result elif isinstance(result, Canvas): cvs = result else: assert 0,", "else: cvs = Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except: print(\"run_tests: render failed for\",", "in names: for test in harvest(path, name, True): yield test def main(): for", "elif isinstance(result, Canvas): cvs = result else: assert 0, \"%r not understood\" %", "test in harvest(path, name, True): yield test def main(): for test in run():", "= items.__next__() if isinstance(result, tuple): result, name = result if isinstance(result, Box): box", "for attr in dir(m): value = getattr(m, attr) if attr.startswith(\"test_\") and isinstance(value, collections.Callable):", "func, start=None, end=None, img=None, result=None): self.func = func self.start = start self.end =", "names if name.endswith(\".py\") and name.startswith(\"test_\")] names.sort() for name in names: for test in", "mkdoc.py to rebuild all docs & images. \"\"\" import os import collections import", "print(\"run_tests: render failed for\", name, func.__name__, \"line\", end) raise except StopIteration: break def", "names: for test in harvest(path, name, True): yield test def main(): for test", "not isinstance(items, collections.Iterator): yield TestRun(func, func.__code__.co_firstlineno, result=items) return start = items.gi_frame.f_lineno # index", "os.listdir(path) names = [name for name in names if name.endswith(\".py\") and name.startswith(\"test_\")] names.sort()", "name not in all_names, \"name dup: %r\"%name all_names.add(name) svgname = \"images/%s.svg\"%name pdfname =", "0, \"%r not understood\" % (result,) if not name: name = \"output-%d\"%counter counter", "counter = 0 def run_test(func, dummy=False): global counter items = func() if not", "break def harvest(path, name, dummy=False): print(\"run_tests.harvest\", name) assert name.endswith(\".py\") stem = name[:-len(\".py\")] desc", "run_test(func, dummy=dummy): yield test def run(): path = os.path.dirname(__file__) names = os.listdir(path) names", "items.gi_frame.f_lineno-1 # index test = TestRun(func, start, end, svgname) yield test start =", "def run(): path = os.path.dirname(__file__) names = os.listdir(path) names = [name for name", "m = getattr(bruhat.render.doc, stem) funcs = [] for attr in dir(m): value =", "True): yield test def main(): for test in run(): pass print(\"run_tests.main: finished\") if", "name) assert name.endswith(\".py\") stem = name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem __import__(desc) m = getattr(bruhat.render.doc,", "yield test def run(): path = os.path.dirname(__file__) names = os.listdir(path) names = [name", "end=None, img=None, result=None): self.func = func self.start = start self.end = end self.img", "\"%r not understood\" % (result,) if not name: name = \"output-%d\"%counter counter +=", "isinstance(result, Canvas): cvs = result else: assert 0, \"%r not understood\" % (result,)", "= \"output-%d\"%counter counter += 1 assert name not in all_names, \"name dup: %r\"%name", "= end+1 if dummy: svgname = \"/dev/null\" pdfname = \"/dev/null\" try: print(\"run_tests: rendering\",", "= Canvas() cvs.append(Scale(2.0)) box.render(cvs) else: cvs = Canvas([Scale(2.0), cvs]) cvs.writeSVGfile(svgname) cvs.writePDFfile(pdfname) print() except:", "items.gi_frame.f_lineno # index while 1: try: box = None cvs = None name", "= [] for attr in dir(m): value = getattr(m, attr) if attr.startswith(\"test_\") and", "def run_test(func, dummy=False): global counter items = func() if not isinstance(items, collections.Iterator): yield", "assert name.endswith(\".py\") stem = name[:-len(\".py\")] desc = \"bruhat.render.doc.\"+stem __import__(desc) m = getattr(bruhat.render.doc, stem)", "items.__next__() if isinstance(result, tuple): result, name = result if isinstance(result, Box): box =", "index test = TestRun(func, start, end, svgname) yield test start = end+1 if", "test in run_test(func, dummy=dummy): yield test def run(): path = os.path.dirname(__file__) names =", "Base from bruhat.render.box import Box class TestRun(Base): def __init__(self, func, start=None, end=None, img=None,", "#!/usr/bin/env python3 \"\"\" Note: use mkdoc.py to rebuild all docs & images. \"\"\"", "svgname = \"images/%s.svg\"%name pdfname = \"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1 # index test =", "pdfname = \"images/%s.pdf\"%name end = items.gi_frame.f_lineno-1 # index test = TestRun(func, start, end,", "and isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key = lambda f : (f.__module__, f.__code__.co_firstlineno)) for func", "in names if name.endswith(\".py\") and name.startswith(\"test_\")] names.sort() for name in names: for test", "cvs = None name = None result = items.__next__() if isinstance(result, tuple): result,", "in dir(m): value = getattr(m, attr) if attr.startswith(\"test_\") and isinstance(value, collections.Callable): funcs.append(value) funcs.sort(key", "name = None result = items.__next__() if isinstance(result, tuple): result, name = result", "in harvest(path, name, True): yield test def main(): for test in run(): pass", "end, svgname) yield test start = end+1 if dummy: svgname = \"/dev/null\" pdfname", "__init__(self, func, start=None, end=None, img=None, result=None): self.func = func self.start = start self.end", "# index while 1: try: box = None cvs = None name =" ]
[ "sum_03 = add(**values) if __name__ == '__main__': print('sum_01 is', sum_01) print('sum_02 is', sum_02)", "pass a tuple directly to the function \"\"\" tuple_value = (3, 4) sum_02", "\"y\":5} sum_03 = add(**values) if __name__ == '__main__': print('sum_01 is', sum_01) print('sum_02 is',", "the function \"\"\" tuple_value = (3, 4) sum_02 = add(*tuple_value) \"\"\"or we can", "sum_01 = add(2, 3) \"\"\" Gently, we can pass a tuple directly to", "add(2, 3) \"\"\" Gently, we can pass a tuple directly to the function", "shows how to gently make use of functions \"\"\" def add(x, y): return(x+y)", "# -*- coding: utf-8 -*- __author__ = '<NAME>' __maintainer__ = \"<NAME>\" __email__ =", "def add(x, y): return(x+y) \"\"\" In general, we use this function by this", "this function by this way \"\"\" sum_01 = add(2, 3) \"\"\" Gently, we", "License 2.0' __creation_date__= 'Dec. 25, 2018' \"\"\" This example shows how to gently", "In general, we use this function by this way \"\"\" sum_01 = add(2,", "= '<NAME>' __maintainer__ = \"<NAME>\" __email__ = '<EMAIL>' __license__ = 'Apache License 2.0'", "'Apache License 2.0' __creation_date__= 'Dec. 25, 2018' \"\"\" This example shows how to", "= add(*tuple_value) \"\"\"or we can dict to pass the parameters\"\"\" values = {\"x\":3,", "add(x, y): return(x+y) \"\"\" In general, we use this function by this way", "example shows how to gently make use of functions \"\"\" def add(x, y):", "sum_02 = add(*tuple_value) \"\"\"or we can dict to pass the parameters\"\"\" values =", "tuple_value = (3, 4) sum_02 = add(*tuple_value) \"\"\"or we can dict to pass", "how to gently make use of functions \"\"\" def add(x, y): return(x+y) \"\"\"", "\"\"\"or we can dict to pass the parameters\"\"\" values = {\"x\":3, \"y\":5} sum_03", "utf-8 -*- __author__ = '<NAME>' __maintainer__ = \"<NAME>\" __email__ = '<EMAIL>' __license__ =", "pass the parameters\"\"\" values = {\"x\":3, \"y\":5} sum_03 = add(**values) if __name__ ==", "can pass a tuple directly to the function \"\"\" tuple_value = (3, 4)", "by this way \"\"\" sum_01 = add(2, 3) \"\"\" Gently, we can pass", "= \"<NAME>\" __email__ = '<EMAIL>' __license__ = 'Apache License 2.0' __creation_date__= 'Dec. 25,", "'Dec. 25, 2018' \"\"\" This example shows how to gently make use of", "to the function \"\"\" tuple_value = (3, 4) sum_02 = add(*tuple_value) \"\"\"or we", "4) sum_02 = add(*tuple_value) \"\"\"or we can dict to pass the parameters\"\"\" values", "way \"\"\" sum_01 = add(2, 3) \"\"\" Gently, we can pass a tuple", "\"\"\" In general, we use this function by this way \"\"\" sum_01 =", "\"\"\" This example shows how to gently make use of functions \"\"\" def", "add(**values) if __name__ == '__main__': print('sum_01 is', sum_01) print('sum_02 is', sum_02) print('sum_03 is',", "-*- coding: utf-8 -*- __author__ = '<NAME>' __maintainer__ = \"<NAME>\" __email__ = '<EMAIL>'", "python # -*- coding: utf-8 -*- __author__ = '<NAME>' __maintainer__ = \"<NAME>\" __email__", "= 'Apache License 2.0' __creation_date__= 'Dec. 25, 2018' \"\"\" This example shows how", "parameters\"\"\" values = {\"x\":3, \"y\":5} sum_03 = add(**values) if __name__ == '__main__': print('sum_01", "we can dict to pass the parameters\"\"\" values = {\"x\":3, \"y\":5} sum_03 =", "coding: utf-8 -*- __author__ = '<NAME>' __maintainer__ = \"<NAME>\" __email__ = '<EMAIL>' __license__", "general, we use this function by this way \"\"\" sum_01 = add(2, 3)", "use of functions \"\"\" def add(x, y): return(x+y) \"\"\" In general, we use", "use this function by this way \"\"\" sum_01 = add(2, 3) \"\"\" Gently,", "__creation_date__= 'Dec. 25, 2018' \"\"\" This example shows how to gently make use", "\"\"\" sum_01 = add(2, 3) \"\"\" Gently, we can pass a tuple directly", "= '<EMAIL>' __license__ = 'Apache License 2.0' __creation_date__= 'Dec. 25, 2018' \"\"\" This", "the parameters\"\"\" values = {\"x\":3, \"y\":5} sum_03 = add(**values) if __name__ == '__main__':", "gently make use of functions \"\"\" def add(x, y): return(x+y) \"\"\" In general,", "function \"\"\" tuple_value = (3, 4) sum_02 = add(*tuple_value) \"\"\"or we can dict", "if __name__ == '__main__': print('sum_01 is', sum_01) print('sum_02 is', sum_02) print('sum_03 is', sum_03)", "\"<NAME>\" __email__ = '<EMAIL>' __license__ = 'Apache License 2.0' __creation_date__= 'Dec. 25, 2018'", "__author__ = '<NAME>' __maintainer__ = \"<NAME>\" __email__ = '<EMAIL>' __license__ = 'Apache License", "y): return(x+y) \"\"\" In general, we use this function by this way \"\"\"", "2.0' __creation_date__= 'Dec. 25, 2018' \"\"\" This example shows how to gently make", "\"\"\" tuple_value = (3, 4) sum_02 = add(*tuple_value) \"\"\"or we can dict to", "3) \"\"\" Gently, we can pass a tuple directly to the function \"\"\"", "-*- __author__ = '<NAME>' __maintainer__ = \"<NAME>\" __email__ = '<EMAIL>' __license__ = 'Apache", "\"\"\" def add(x, y): return(x+y) \"\"\" In general, we use this function by", "to pass the parameters\"\"\" values = {\"x\":3, \"y\":5} sum_03 = add(**values) if __name__", "= add(**values) if __name__ == '__main__': print('sum_01 is', sum_01) print('sum_02 is', sum_02) print('sum_03", "__maintainer__ = \"<NAME>\" __email__ = '<EMAIL>' __license__ = 'Apache License 2.0' __creation_date__= 'Dec.", "function by this way \"\"\" sum_01 = add(2, 3) \"\"\" Gently, we can", "__license__ = 'Apache License 2.0' __creation_date__= 'Dec. 25, 2018' \"\"\" This example shows", "values = {\"x\":3, \"y\":5} sum_03 = add(**values) if __name__ == '__main__': print('sum_01 is',", "to gently make use of functions \"\"\" def add(x, y): return(x+y) \"\"\" In", "this way \"\"\" sum_01 = add(2, 3) \"\"\" Gently, we can pass a", "(3, 4) sum_02 = add(*tuple_value) \"\"\"or we can dict to pass the parameters\"\"\"", "= add(2, 3) \"\"\" Gently, we can pass a tuple directly to the", "functions \"\"\" def add(x, y): return(x+y) \"\"\" In general, we use this function", "= (3, 4) sum_02 = add(*tuple_value) \"\"\"or we can dict to pass the", "Gently, we can pass a tuple directly to the function \"\"\" tuple_value =", "'<EMAIL>' __license__ = 'Apache License 2.0' __creation_date__= 'Dec. 25, 2018' \"\"\" This example", "a tuple directly to the function \"\"\" tuple_value = (3, 4) sum_02 =", "directly to the function \"\"\" tuple_value = (3, 4) sum_02 = add(*tuple_value) \"\"\"or", "= {\"x\":3, \"y\":5} sum_03 = add(**values) if __name__ == '__main__': print('sum_01 is', sum_01)", "This example shows how to gently make use of functions \"\"\" def add(x,", "make use of functions \"\"\" def add(x, y): return(x+y) \"\"\" In general, we", "{\"x\":3, \"y\":5} sum_03 = add(**values) if __name__ == '__main__': print('sum_01 is', sum_01) print('sum_02", "25, 2018' \"\"\" This example shows how to gently make use of functions", "return(x+y) \"\"\" In general, we use this function by this way \"\"\" sum_01", "__email__ = '<EMAIL>' __license__ = 'Apache License 2.0' __creation_date__= 'Dec. 25, 2018' \"\"\"", "of functions \"\"\" def add(x, y): return(x+y) \"\"\" In general, we use this", "\"\"\" Gently, we can pass a tuple directly to the function \"\"\" tuple_value", "tuple directly to the function \"\"\" tuple_value = (3, 4) sum_02 = add(*tuple_value)", "can dict to pass the parameters\"\"\" values = {\"x\":3, \"y\":5} sum_03 = add(**values)", "'<NAME>' __maintainer__ = \"<NAME>\" __email__ = '<EMAIL>' __license__ = 'Apache License 2.0' __creation_date__=", "we use this function by this way \"\"\" sum_01 = add(2, 3) \"\"\"", "dict to pass the parameters\"\"\" values = {\"x\":3, \"y\":5} sum_03 = add(**values) if", "add(*tuple_value) \"\"\"or we can dict to pass the parameters\"\"\" values = {\"x\":3, \"y\":5}", "we can pass a tuple directly to the function \"\"\" tuple_value = (3,", "2018' \"\"\" This example shows how to gently make use of functions \"\"\"", "#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = '<NAME>' __maintainer__ = \"<NAME>\"" ]
[ "\"Java-12 Path\" jar = \"Path to jar file\" repository = \"Path to input", "jar = \"Path to jar file\" repository = \"Path to input ontologies\" outRep", "emptied and recycled\") for file in os.listdir(outDir): file_path = os.path.join(outDir, file) try: if", "!= \"output.log\": os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) print(name +", "directory\" experimentScript = \"Path to the single experiment script\" coverage=100 for i in", "summaryPath = outRep + \"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary file exists. Statistics", "Exception as e: print(e) print(name + \" started\") with open(outDir + '/output.log', 'w')", "os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) print(name + \" started\") with open(outDir", "name os.makedirs(outDir) try: os.makedirs(outDir) except FileExistsError: print (\"Output directory exists. Directory will be", "-repo {repo} -outFile {outFile}'.format(java=java, jar=jar, prog=\"uk.ac.man.OfflineSummaryGenerator\", repo=outRep, outFile=summaryPath) print(summaryCmd, flush=True) returned_value = subprocess.call(summaryCmd,shell=True)", "elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) print(name + \" started\") with", "'{java} -cp {jar} {prog} -repo {repo} -outFile {outFile}'.format(java=java, jar=jar, prog=\"uk.ac.man.OfflineSummaryGenerator\", repo=outRep, outFile=summaryPath) print(summaryCmd,", "= \"experiment\" + str(i) outDir = outRep + name os.makedirs(outDir) try: os.makedirs(outDir) except", "Time,Excluded Clauses,Lethe Time\") summaryFile.close() summaryCmd = '{java} -cp {jar} {prog} -repo {repo} -outFile", "\"python {script} -n {name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out) print(name + \"", "\" started\") with open(outDir + '/output.log', 'w') as out: executorCmd = \"python {script}", "import os import subprocess import shutil java = \"Java-12 Path\" jar = \"Path", "complete\") summaryPath = outRep + \"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary file exists.", "-cp {jar} {prog} -repo {repo} -outFile {outFile}'.format(java=java, jar=jar, prog=\"uk.ac.man.OfflineSummaryGenerator\", repo=outRep, outFile=summaryPath) print(summaryCmd, flush=True)", "(\"Output directory exists. Directory will be emptied and recycled\") for file in os.listdir(outDir):", "file) try: if os.path.isfile(file_path): if file != \"output.log\": os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except", "+ \" complete\") summaryPath = outRep + \"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary", "exists. Directory will be emptied and recycled\") for file in os.listdir(outDir): file_path =", "\"experiment\" + str(i) outDir = outRep + name os.makedirs(outDir) try: os.makedirs(outDir) except FileExistsError:", "Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe Time\") summaryFile.close() summaryCmd = '{java} -cp {jar} {prog}", "Time,Reduction Time,Excluded Clauses,Lethe Time\") summaryFile.close() summaryCmd = '{java} -cp {jar} {prog} -repo {repo}", "Size,Different Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe Time\") summaryFile.close() summaryCmd = '{java} -cp {jar}", "if os.path.isfile(file_path): if file != \"output.log\": os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as", "= os.path.join(outDir, file) try: if os.path.isfile(file_path): if file != \"output.log\": os.unlink(file_path) elif os.path.isdir(file_path):", "exists. Statistics will be appended to file.\") summaryFile = open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig", "os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) print(name + \" started\")", "file.\") summaryFile = open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe", "= open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe Time\") summaryFile.close()", "-n {name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out) print(name + \" complete\") summaryPath", "FileExistsError: print (\"Output directory exists. Directory will be emptied and recycled\") for file", "\"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe Time\") summaryFile.close() summaryCmd =", "file in os.listdir(outDir): file_path = os.path.join(outDir, file) try: if os.path.isfile(file_path): if file !=", "will be appended to file.\") summaryFile = open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic", "if os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary file exists. Statistics will be appended to file.\")", "if file != \"output.log\": os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e)", "input ontologies\" outRep = \"Output directory\" experimentScript = \"Path to the single experiment", "shutil.rmtree(file_path) except Exception as e: print(e) print(name + \" started\") with open(outDir +", "\"Path to jar file\" repository = \"Path to input ontologies\" outRep = \"Output", "str(i) outDir = outRep + name os.makedirs(outDir) try: os.makedirs(outDir) except FileExistsError: print (\"Output", "to the single experiment script\" coverage=100 for i in range(1, 91): name =", "os.unlink(summaryPath) # print(\"Summary file exists. Statistics will be appended to file.\") summaryFile =", "+ name os.makedirs(outDir) try: os.makedirs(outDir) except FileExistsError: print (\"Output directory exists. Directory will", "in os.listdir(outDir): file_path = os.path.join(outDir, file) try: if os.path.isfile(file_path): if file != \"output.log\":", "and recycled\") for file in os.listdir(outDir): file_path = os.path.join(outDir, file) try: if os.path.isfile(file_path):", "import subprocess import shutil java = \"Java-12 Path\" jar = \"Path to jar", "name = \"experiment\" + str(i) outDir = outRep + name os.makedirs(outDir) try: os.makedirs(outDir)", "+ str(i) outDir = outRep + name os.makedirs(outDir) try: os.makedirs(outDir) except FileExistsError: print", "summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe Time\") summaryFile.close() summaryCmd = '{java}", "print(name + \" started\") with open(outDir + '/output.log', 'w') as out: executorCmd =", "+ '/output.log', 'w') as out: executorCmd = \"python {script} -n {name} -c {coverage}}\".format(script=experimentScript,", "\"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary file exists. Statistics will be appended to", "to file.\") summaryFile = open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic View Time,Reduction Time,Excluded", "import sys import os import subprocess import shutil java = \"Java-12 Path\" jar", "script\" coverage=100 for i in range(1, 91): name = \"experiment\" + str(i) outDir", "= \"python {script} -n {name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out) print(name +", "experimentScript = \"Path to the single experiment script\" coverage=100 for i in range(1,", "import shutil java = \"Java-12 Path\" jar = \"Path to jar file\" repository", "Size,Sig Size,Different Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe Time\") summaryFile.close() summaryCmd = '{java} -cp", "= '{java} -cp {jar} {prog} -repo {repo} -outFile {outFile}'.format(java=java, jar=jar, prog=\"uk.ac.man.OfflineSummaryGenerator\", repo=outRep, outFile=summaryPath)", "shutil java = \"Java-12 Path\" jar = \"Path to jar file\" repository =", "single experiment script\" coverage=100 for i in range(1, 91): name = \"experiment\" +", "to input ontologies\" outRep = \"Output directory\" experimentScript = \"Path to the single", "= \"Path to the single experiment script\" coverage=100 for i in range(1, 91):", "Path\" jar = \"Path to jar file\" repository = \"Path to input ontologies\"", "in range(1, 91): name = \"experiment\" + str(i) outDir = outRep + name", "91): name = \"experiment\" + str(i) outDir = outRep + name os.makedirs(outDir) try:", "directory exists. Directory will be emptied and recycled\") for file in os.listdir(outDir): file_path", "stderr=out) print(name + \" complete\") summaryPath = outRep + \"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath)", "outDir = outRep + name os.makedirs(outDir) try: os.makedirs(outDir) except FileExistsError: print (\"Output directory", "summaryCmd = '{java} -cp {jar} {prog} -repo {repo} -outFile {outFile}'.format(java=java, jar=jar, prog=\"uk.ac.man.OfflineSummaryGenerator\", repo=outRep,", "\"Output directory\" experimentScript = \"Path to the single experiment script\" coverage=100 for i", "experiment script\" coverage=100 for i in range(1, 91): name = \"experiment\" + str(i)", "{coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out) print(name + \" complete\") summaryPath = outRep +", "os.makedirs(outDir) try: os.makedirs(outDir) except FileExistsError: print (\"Output directory exists. Directory will be emptied", "\"Path to input ontologies\" outRep = \"Output directory\" experimentScript = \"Path to the", "executorCmd = \"python {script} -n {name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out) print(name", "Clauses,Lethe Time\") summaryFile.close() summaryCmd = '{java} -cp {jar} {prog} -repo {repo} -outFile {outFile}'.format(java=java,", "{prog} -repo {repo} -outFile {outFile}'.format(java=java, jar=jar, prog=\"uk.ac.man.OfflineSummaryGenerator\", repo=outRep, outFile=summaryPath) print(summaryCmd, flush=True) returned_value =", "= \"Java-12 Path\" jar = \"Path to jar file\" repository = \"Path to", "range(1, 91): name = \"experiment\" + str(i) outDir = outRep + name os.makedirs(outDir)", "= \"Output directory\" experimentScript = \"Path to the single experiment script\" coverage=100 for", "e: print(e) print(name + \" started\") with open(outDir + '/output.log', 'w') as out:", "'w') as out: executorCmd = \"python {script} -n {name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd,", "sys import os import subprocess import shutil java = \"Java-12 Path\" jar =", "as out: executorCmd = \"python {script} -n {name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out,", "print(\"Summary file exists. Statistics will be appended to file.\") summaryFile = open(summaryPath, \"w+\")", "= \"Path to jar file\" repository = \"Path to input ontologies\" outRep =", "{name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out) print(name + \" complete\") summaryPath =", "print (\"Output directory exists. Directory will be emptied and recycled\") for file in", "file exists. Statistics will be appended to file.\") summaryFile = open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O", "outRep + name os.makedirs(outDir) try: os.makedirs(outDir) except FileExistsError: print (\"Output directory exists. Directory", "outRep + \"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary file exists. Statistics will be", "to jar file\" repository = \"Path to input ontologies\" outRep = \"Output directory\"", "= outRep + \"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary file exists. Statistics will", "summaryFile.close() summaryCmd = '{java} -cp {jar} {prog} -repo {repo} -outFile {outFile}'.format(java=java, jar=jar, prog=\"uk.ac.man.OfflineSummaryGenerator\",", "recycled\") for file in os.listdir(outDir): file_path = os.path.join(outDir, file) try: if os.path.isfile(file_path): if", "print(e) print(name + \" started\") with open(outDir + '/output.log', 'w') as out: executorCmd", "\"Path to the single experiment script\" coverage=100 for i in range(1, 91): name", "as e: print(e) print(name + \" started\") with open(outDir + '/output.log', 'w') as", "os.path.join(outDir, file) try: if os.path.isfile(file_path): if file != \"output.log\": os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path)", "with open(outDir + '/output.log', 'w') as out: executorCmd = \"python {script} -n {name}", "os.makedirs(outDir) except FileExistsError: print (\"Output directory exists. Directory will be emptied and recycled\")", "Statistics will be appended to file.\") summaryFile = open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different", "java = \"Java-12 Path\" jar = \"Path to jar file\" repository = \"Path", "try: os.makedirs(outDir) except FileExistsError: print (\"Output directory exists. Directory will be emptied and", "os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary file exists. Statistics will be appended to file.\") summaryFile", "the single experiment script\" coverage=100 for i in range(1, 91): name = \"experiment\"", "be emptied and recycled\") for file in os.listdir(outDir): file_path = os.path.join(outDir, file) try:", "stdout=out, stderr=out) print(name + \" complete\") summaryPath = outRep + \"summary.csv\" if os.path.isfile(summaryPath):", "will be emptied and recycled\") for file in os.listdir(outDir): file_path = os.path.join(outDir, file)", "os import subprocess import shutil java = \"Java-12 Path\" jar = \"Path to", "name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out) print(name + \" complete\") summaryPath = outRep + \"summary.csv\"", "except Exception as e: print(e) print(name + \" started\") with open(outDir + '/output.log',", "os.path.isfile(file_path): if file != \"output.log\": os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e:", "# print(\"Summary file exists. Statistics will be appended to file.\") summaryFile = open(summaryPath,", "coverage=100 for i in range(1, 91): name = \"experiment\" + str(i) outDir =", "subprocess import shutil java = \"Java-12 Path\" jar = \"Path to jar file\"", "View Time,Reduction Time,Excluded Clauses,Lethe Time\") summaryFile.close() summaryCmd = '{java} -cp {jar} {prog} -repo", "open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe Time\") summaryFile.close() summaryCmd", "outRep = \"Output directory\" experimentScript = \"Path to the single experiment script\" coverage=100", "ontologies\" outRep = \"Output directory\" experimentScript = \"Path to the single experiment script\"", "= \"Path to input ontologies\" outRep = \"Output directory\" experimentScript = \"Path to", "for file in os.listdir(outDir): file_path = os.path.join(outDir, file) try: if os.path.isfile(file_path): if file", "subprocess.call(executorCmd, stdout=out, stderr=out) print(name + \" complete\") summaryPath = outRep + \"summary.csv\" if", "{jar} {prog} -repo {repo} -outFile {outFile}'.format(java=java, jar=jar, prog=\"uk.ac.man.OfflineSummaryGenerator\", repo=outRep, outFile=summaryPath) print(summaryCmd, flush=True) returned_value", "be appended to file.\") summaryFile = open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic View", "Directory will be emptied and recycled\") for file in os.listdir(outDir): file_path = os.path.join(outDir,", "Time\") summaryFile.close() summaryCmd = '{java} -cp {jar} {prog} -repo {repo} -outFile {outFile}'.format(java=java, jar=jar,", "-c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out) print(name + \" complete\") summaryPath = outRep", "except FileExistsError: print (\"Output directory exists. Directory will be emptied and recycled\") for", "file_path = os.path.join(outDir, file) try: if os.path.isfile(file_path): if file != \"output.log\": os.unlink(file_path) elif", "= outRep + name os.makedirs(outDir) try: os.makedirs(outDir) except FileExistsError: print (\"Output directory exists.", "started\") with open(outDir + '/output.log', 'w') as out: executorCmd = \"python {script} -n", "\"output.log\": os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) print(name + \"", "for i in range(1, 91): name = \"experiment\" + str(i) outDir = outRep", "repository = \"Path to input ontologies\" outRep = \"Output directory\" experimentScript = \"Path", "{script} -n {name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out) print(name + \" complete\")", "'/output.log', 'w') as out: executorCmd = \"python {script} -n {name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage)", "i in range(1, 91): name = \"experiment\" + str(i) outDir = outRep +", "os.listdir(outDir): file_path = os.path.join(outDir, file) try: if os.path.isfile(file_path): if file != \"output.log\": os.unlink(file_path)", "file\" repository = \"Path to input ontologies\" outRep = \"Output directory\" experimentScript =", "jar file\" repository = \"Path to input ontologies\" outRep = \"Output directory\" experimentScript", "+ \" started\") with open(outDir + '/output.log', 'w') as out: executorCmd = \"python", "open(outDir + '/output.log', 'w') as out: executorCmd = \"python {script} -n {name} -c", "file != \"output.log\": os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) print(name", "appended to file.\") summaryFile = open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic View Time,Reduction", "<filename>scripts/batchOfflineExperiment.py import sys import os import subprocess import shutil java = \"Java-12 Path\"", "try: if os.path.isfile(file_path): if file != \"output.log\": os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception", "\" complete\") summaryPath = outRep + \"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary file", "summaryFile = open(summaryPath, \"w+\") summaryFile.write(\"Experiment,O Size,Sig Size,Different Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe Time\")", "+ \"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath) # print(\"Summary file exists. Statistics will be appended", "print(name + \" complete\") summaryPath = outRep + \"summary.csv\" if os.path.isfile(summaryPath): os.unlink(summaryPath) #", "out: executorCmd = \"python {script} -n {name} -c {coverage}}\".format(script=experimentScript, name=name,coverage=coverage) subprocess.call(executorCmd, stdout=out, stderr=out)" ]
[ "MakeRequest from mussel.scheme.api import Interface class TestMakeAPIRequests: def test_can_be_instantiated(self): mr = MakeRequest() assert", "isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\", [ Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"), Interface(\"head\", \"url\"), Interface(\"options\", \"url\"),", "coding: utf-8 -*- \"\"\" @author: <NAME> @email: <EMAIL> @time: 2021/12/17 20:25 \"\"\" from", "\"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\") def test_http_method_calls_correct_session_method(self, mocked_session, interface): mar = MakeRequest() mar.send(interface) getattr(mar.session,", "\"\"\" @author: <NAME> @email: <EMAIL> @time: 2021/12/17 20:25 \"\"\" from unittest import mock", "\"interface\", [ Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"), Interface(\"head\", \"url\"), Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"), Interface(\"post\",", "import Interface class TestMakeAPIRequests: def test_can_be_instantiated(self): mr = MakeRequest() assert isinstance(mr, MakeRequest) @pytest.mark.parametrize(", "mr = MakeRequest() assert isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\", [ Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"),", "Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"), Interface(\"head\", \"url\"), Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"), Interface(\"put\",", "utf-8 -*- \"\"\" @author: <NAME> @email: <EMAIL> @time: 2021/12/17 20:25 \"\"\" from unittest", "20:25 \"\"\" from unittest import mock import pytest from mussel.core.make_requests import MakeRequest from", "@author: <NAME> @email: <EMAIL> @time: 2021/12/17 20:25 \"\"\" from unittest import mock import", "Interface(\"put\", \"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\") def test_http_method_calls_correct_session_method(self, mocked_session, interface): mar = MakeRequest() mar.send(interface)", "\"url\"), Interface(\"head\", \"url\"), Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"), Interface(\"put\", \"url\"), ], )", "\"url\"), Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"), Interface(\"put\", \"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\") def", "<EMAIL> @time: 2021/12/17 20:25 \"\"\" from unittest import mock import pytest from mussel.core.make_requests", "from mussel.core.make_requests import MakeRequest from mussel.scheme.api import Interface class TestMakeAPIRequests: def test_can_be_instantiated(self): mr", "@time: 2021/12/17 20:25 \"\"\" from unittest import mock import pytest from mussel.core.make_requests import", "@email: <EMAIL> @time: 2021/12/17 20:25 \"\"\" from unittest import mock import pytest from", "\"\"\" from unittest import mock import pytest from mussel.core.make_requests import MakeRequest from mussel.scheme.api", "from unittest import mock import pytest from mussel.core.make_requests import MakeRequest from mussel.scheme.api import", "\"url\"), Interface(\"get\", \"url\"), Interface(\"head\", \"url\"), Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"), Interface(\"put\", \"url\"),", "\"url\"), Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"), Interface(\"put\", \"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\") def test_http_method_calls_correct_session_method(self, mocked_session,", "-*- coding: utf-8 -*- \"\"\" @author: <NAME> @email: <EMAIL> @time: 2021/12/17 20:25 \"\"\"", "Interface(\"post\", \"url\"), Interface(\"put\", \"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\") def test_http_method_calls_correct_session_method(self, mocked_session, interface): mar =", "-*- \"\"\" @author: <NAME> @email: <EMAIL> @time: 2021/12/17 20:25 \"\"\" from unittest import", "from mussel.scheme.api import Interface class TestMakeAPIRequests: def test_can_be_instantiated(self): mr = MakeRequest() assert isinstance(mr,", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" @author: <NAME> @email: <EMAIL> @time:", "Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"), Interface(\"put\", \"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\") def test_http_method_calls_correct_session_method(self, mocked_session, interface):", "Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"), Interface(\"put\", \"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\") def test_http_method_calls_correct_session_method(self,", "\"url\"), Interface(\"put\", \"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\") def test_http_method_calls_correct_session_method(self, mocked_session, interface): mar = MakeRequest()", "@pytest.mark.parametrize( \"interface\", [ Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"), Interface(\"head\", \"url\"), Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"),", "MakeRequest() assert isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\", [ Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"), Interface(\"head\", \"url\"),", "[ Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"), Interface(\"head\", \"url\"), Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"),", "Interface class TestMakeAPIRequests: def test_can_be_instantiated(self): mr = MakeRequest() assert isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\",", "Interface(\"head\", \"url\"), Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"), Interface(\"put\", \"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\")", "unittest import mock import pytest from mussel.core.make_requests import MakeRequest from mussel.scheme.api import Interface", "pytest from mussel.core.make_requests import MakeRequest from mussel.scheme.api import Interface class TestMakeAPIRequests: def test_can_be_instantiated(self):", "Interface(\"get\", \"url\"), Interface(\"head\", \"url\"), Interface(\"options\", \"url\"), Interface(\"patch\", \"url\"), Interface(\"post\", \"url\"), Interface(\"put\", \"url\"), ],", "mussel.core.make_requests import MakeRequest from mussel.scheme.api import Interface class TestMakeAPIRequests: def test_can_be_instantiated(self): mr =", "2021/12/17 20:25 \"\"\" from unittest import mock import pytest from mussel.core.make_requests import MakeRequest", "<NAME> @email: <EMAIL> @time: 2021/12/17 20:25 \"\"\" from unittest import mock import pytest", "\"url\"), Interface(\"post\", \"url\"), Interface(\"put\", \"url\"), ], ) @mock.patch(\"mussel.core.make_requests.Session\") def test_http_method_calls_correct_session_method(self, mocked_session, interface): mar", "import MakeRequest from mussel.scheme.api import Interface class TestMakeAPIRequests: def test_can_be_instantiated(self): mr = MakeRequest()", "mussel.scheme.api import Interface class TestMakeAPIRequests: def test_can_be_instantiated(self): mr = MakeRequest() assert isinstance(mr, MakeRequest)", "], ) @mock.patch(\"mussel.core.make_requests.Session\") def test_http_method_calls_correct_session_method(self, mocked_session, interface): mar = MakeRequest() mar.send(interface) getattr(mar.session, interface.method).assert_called_once()", "mock import pytest from mussel.core.make_requests import MakeRequest from mussel.scheme.api import Interface class TestMakeAPIRequests:", "python3 # -*- coding: utf-8 -*- \"\"\" @author: <NAME> @email: <EMAIL> @time: 2021/12/17", "TestMakeAPIRequests: def test_can_be_instantiated(self): mr = MakeRequest() assert isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\", [ Interface(\"delete\",", "import pytest from mussel.core.make_requests import MakeRequest from mussel.scheme.api import Interface class TestMakeAPIRequests: def", "class TestMakeAPIRequests: def test_can_be_instantiated(self): mr = MakeRequest() assert isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\", [", "test_can_be_instantiated(self): mr = MakeRequest() assert isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\", [ Interface(\"delete\", \"url\"), Interface(\"get\",", "assert isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\", [ Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"), Interface(\"head\", \"url\"), Interface(\"options\",", "def test_can_be_instantiated(self): mr = MakeRequest() assert isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\", [ Interface(\"delete\", \"url\"),", "MakeRequest) @pytest.mark.parametrize( \"interface\", [ Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"), Interface(\"head\", \"url\"), Interface(\"options\", \"url\"), Interface(\"patch\",", "= MakeRequest() assert isinstance(mr, MakeRequest) @pytest.mark.parametrize( \"interface\", [ Interface(\"delete\", \"url\"), Interface(\"get\", \"url\"), Interface(\"head\",", "# -*- coding: utf-8 -*- \"\"\" @author: <NAME> @email: <EMAIL> @time: 2021/12/17 20:25", "import mock import pytest from mussel.core.make_requests import MakeRequest from mussel.scheme.api import Interface class" ]
[ "# We found 6 out of 8 indices in the support. Here are", "the maximum tolerance for residual norm res_norm_rtol = 1e-3 max_r_norm_sqr = y_norm_sqr *", "%% # Compute the residual and verify that it is still larger than", "algorithm steps by comparing the estimates # with actual ``x0`` and ``omega``. However,", "for sparse recovery. It then shows how to use the official implementation of", "the residual and verify that it is still larger than the allowed tolerance", "= x_sub[Ia] # %% # We now have our first estimate of the", "algorithm converged. print(solution.r_norm_sqr, solution.iterations) # %% # Let's plot the solution x =", "has been used for data measurements. * A measurement vector ``y``. * The", "Here is our updated estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi=", "# The algorithm has no direct way of knowing that it indeed found", "missing) # %% # It is time to compute the residual after the", "Number of iterations completed so far iterations = 0 # %% # A", "%% # Indeed we did. The set difference is empty. print(\"Missing in I_sub:", "``I_sub`` Phi_sub = Phi[:, flags] # %% # Compute the least squares solution", "= flags.at[I].set(True) print(jnp.where(flags)) # %% # Mark the completion of the iteration iterations", "r_norm_sqr = float(r.T @ r) # It turns out that it is now", "array to track the indices selected for least squares steps flags = jnp.zeros(N,", "can stop iterating now. iterations += 1 # %% # CR-Sparse official implementation", "print(I) # %% # The non-zero values on the support x_I = solution.x_I", "there. # # Initialization # '''''''''''''''''''''''''''''''''''''''''''' # %% # We assume the initial", "any time, up to 3K atoms may be selected (after the merge step).", "the support x_I = solution.x_I print(x_I) # %% # Verify that we successfully", "vector is there. # # Initialization # '''''''''''''''''''''''''''''''''''''''''''' # %% # We assume", "array flags = flags.at[I_sub].set(True) # Sort the ``I_sub`` array with the help of", "it is still larger than the allowed tolerance r_norm_sqr = float(r.T @ r)", "print(jnp.where(flags)) # %% # Mark the completion of the iteration iterations += 1", "non-zero values on the support x_I = solution.x_I print(x_I) # %% # Verify", "= 0 # %% # A limit on the maximum tolerance for residual", "of CoSaMP (Compressive Sensing Matching Pursuit) algorithm for sparse recovery. It then shows", "# progress made by the algorithm steps by comparing the estimates # with", "picking the correct indices from the actual support of the signal found =", "x``. Each iteration of the algorithm successively improves the estimate ``x`` so that", "in first iteration. print(f\"{I_sub=}\") # %% # Check which indices from ``omega`` are", "the current residual with the atoms in ``Phi`` h = Phi.T @ r", "flags.at[:].set(False) flags = flags.at[I].set(True) print(jnp.where(flags)) # %% # Mark the completion of the", "# Compute the residual and verify that it is now below the allowed", "Setup # ------------------ # Number of measurements M = 128 # Ambient dimension", "the algorithm steps by comparing the estimates # with actual ``x0`` and ``omega``.", "that we successfully recovered the support print(jnp.setdiff1d(omega, I)) # %% # Print the", "print(\"Missing in I_sub: \", jnp.setdiff1d(omega, I_sub)) # %% # Select the subdictionary of", "# Coherence of atoms in the sensing matrix print(crdict.coherence(Phi)) # %% # A", "import necessary libraries import jax from jax import random import jax.numpy as jnp", "be more aggressive # and pick 3K atoms in first iteration. print(f\"{I_sub=}\") #", "I_sub = crs.largest_indices(h, K3) # Update the flags array flags = flags.at[I_sub].set(True) #", "= float(r.T @ r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") # %% # Store the selected", "``Phi`` consisting of atoms indexed by ``I_sub`` Phi_sub = Phi[:, flags] # %%", "the support. Here are the remaining. missing = jnp.setdiff1d(omega, I) print(\"Missing indices: \",", "largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(Ia) # %% #", "zero and # the residual ``r = y - Phi x`` to equal", "print(x_I) # %% # Verify that we successfully recovered the support print(jnp.setdiff1d(omega, I))", "the flags array flags = flags.at[I_sub].set(True) # Sort the ``I_sub`` array with the", "the LS solution x_I = x_sub[Ia] # %% # We now have our", "# Squared norm/energy of the residual y_norm_sqr = float(y.T @ y) r_norm_sqr =", "``r`` reduces. The algorithm proceeds as follows: * Initialize the solution ``x`` with", "the # progress made by the algorithm steps by comparing the estimates #", "print(jnp.intersect1d(omega, I_2k)) # %% # Merge (union) the set of previous K indices", "the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original vector\")", "8 indices in the support. Here are the remaining. missing = jnp.setdiff1d(omega, I)", "K) # The support for the sparse solution I = solution.I print(I) #", "# Pick the indices of K largest entries in in ``x_sub`` Ia =", "can be more aggressive # and pick 3K atoms in first iteration. print(f\"{I_sub=}\")", "original model # vector is there. # # Initialization # '''''''''''''''''''''''''''''''''''''''''''' # %%", "x`` to equal the measurements ``y`` r = y # %% # Squared", "by step ========================== This example explains the step by step development of CoSaMP", "in ``I`` to form ``I_sub``. * **LS**: Compute the least squares solution of", "K) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0) # %% # ``omega`` contains the", "%% # Development of CoSaMP algorithm # --------------------------------- # In the following, we", "``x_sub`` Ia = crs.largest_indices(x_sub, K) print(Ia) # %% # We need to map", "%% # Print the residual energy and the number of iterations when the", "of the iteration iterations += 1 # %% # Second iteration # ''''''''''''''''''''''''''''''''''''''''''''", "corresponding values from the LS solution x_I = x_sub[Ia] # %% # Here", "* (res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\") # %% # First iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First", "indexed by ``I_sub`` Phi_sub = Phi[:, flags] # %% # Compute the least", "vector # '''''''''''''''''''''''''' x0, omega = crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6), dpi= 100, facecolor='w',", "in ``Phi`` h = Phi.T @ r # %% # Pick the indices", "2K atoms with largest matches with the residual I_2k = crs.largest_indices(h, K2 if", "``x``) print(omega) # %% # Compressive measurements # '''''''''''''''''''''''''' y = Phi @", "be selected (after the merge step). K3 = K + K2 # %%", "# Pick the indices of 3K atoms with largest matches with the residual", "an index set ``I`` (initially empty) of atoms selected as part of the", "tolerance r_norm_sqr = float(r.T @ r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") # %% # Store", "# Pick the indices of 2K atoms with largest matches with the residual", "are there in ``I_sub``. print(jnp.intersect1d(omega, I_sub)) # %% # Select the subdictionary of", "and keep them in ``I``. * **Update residual**: Compute ``r = y -", "# Some keys for generating random numbers key = random.PRNGKey(0) keys = random.split(key,", "# Initialization # '''''''''''''''''''''''''''''''''''''''''''' # %% # We assume the initial solution to", "= 256 # Sparsity level K = 8 # %% # The Sparsifying", "The algorithm proceeds as follows: * Initialize the solution ``x`` with zero. *", "4) # For plotting diagrams import matplotlib.pyplot as plt # CR-Sparse modules import", "include the atoms missed out in first iteration. print(jnp.intersect1d(omega, I_2k)) # %% #", "explains the step by step development of CoSaMP (Compressive Sensing Matching Pursuit) algorithm", "now have our first estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi=", "the indices in ``omega`` jnp.setdiff1d(omega, I) # %% # Select the corresponding values", "iteration. print(f\"{I_sub=}\") # %% # Check which indices from ``omega`` are there in", "# Development of CoSaMP algorithm # --------------------------------- # In the following, we walk", "plt.legend() # %% # We can check how good we were in picking", "sparse solution I = solution.I print(I) # %% # The non-zero values on", "# %% # We need to map the indices in ``Ia`` to the", "solver solution = cosamp.matrix_solve_jit(Phi, y, K) # The support for the sparse solution", "the step by step development of CoSaMP (Compressive Sensing Matching Pursuit) algorithm for", "# Check if the final K indices in ``I`` include all the indices", "key = random.PRNGKey(0) keys = random.split(key, 4) # For plotting diagrams import matplotlib.pyplot", "crs.largest_indices(x_sub, K) print(f\"{Ia=}\") # %% # We need to map the indices in", "= jnp.where(flags) print(f\"{I_sub=}\") # %% # We can check if we found all", "there in ``I_sub``. print(jnp.intersect1d(omega, I_sub)) # %% # Select the subdictionary of ``Phi``", "direct way of knowing that it indeed found the solution # It is", "corresponding values from the LS solution x_I = x_sub[Ia] # %% # We", "indices at which x is nonzero (support of ``x``) print(omega) # %% #", "``x`` so that the energy of the residual ``r`` reduces. The algorithm proceeds", "initial solution to be zero and # the residual ``r = y -", "Select the indices of 2K atoms from ``Phi`` with the largest correlation with", "plt # CR-Sparse modules import cr.sparse as crs import cr.sparse.dict as crdict import", "for data measurements. * A measurement vector ``y``. * The sparsity level ``K``.", "``x`` such that ``y`` is approximately equal to ``Phi x``. A key quantity", "128 # Ambient dimension N = 256 # Sparsity level K = 8", "see the algorithm in action. \"\"\" # %% # Let's import necessary libraries", "256 # Sparsity level K = 8 # %% # The Sparsifying Basis", "is available in # ``cr.sparse.pursuit.cosamp`` module. # %% # Import the module from", "%% # Here is our updated estimate of the solution x = jnp.zeros(N).at[I].set(x_I)", "vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # The algorithm has no", "completion of the iteration iterations += 1 # %% # Second iteration #", "in picking the correct indices from the actual support of the signal found", "empty. print(\"Missing in I_sub: \", jnp.setdiff1d(omega, I_sub)) # %% # Select the subdictionary", "of each atom in ``Phi`` with the current residual ``r``. * **Identify**: Select", "signal recovery. We can stop iterating now. iterations += 1 # %% #", "have access to ``x0`` and ``omega``, we can measure the # progress made", "implementation of CoSaMP in ``CR-Sparse``. The CoSaMP algorithm has following inputs: * A", "* A sensing matrix or dictionary ``Phi`` which has been used for data", "%% # Number of iterations completed so far iterations = 0 # %%", "``I`` (initially empty) of atoms selected as part of the solution. * While", "equal the measurements ``y`` r = y # %% # Squared norm/energy of", "residual y_norm_sqr = float(y.T @ y) r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\") # %% #", "check if we found all the actual atoms print(\"Found in I_sub: \", jnp.intersect1d(omega,", "'''''''''''''''''''''''''' y = Phi @ x0 plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(y) #", "module from cr.sparse.pursuit import cosamp # %% # Run the solver solution =", "r # %% # Pick the indices of 2K atoms with largest matches", "# %% # Store the selected K indices in the flags array flags", "the final K indices in ``I`` include all the indices in ``omega`` jnp.setdiff1d(omega,", "with the help of flags array I_sub, = jnp.where(flags) # Since no atoms", "to 3K atoms may be selected (after the merge step). K3 = K", "%% # Check if the final K indices in ``I`` include all the", "@ y) r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\") # %% # A boolean array to", "{max_r_norm_sqr=:.2e}\") # %% # We have completed the signal recovery. We can stop", "# with actual ``x0`` and ``omega``. However, note that in the # real", "residual. * **Merge**: merge these 2K indices with currently selected indices in ``I``", "# %% # Problem Setup # ------------------ # Number of measurements M =", "float(r.T @ r) # It turns out that it is now below the", "* A measurement vector ``y``. * The sparsity level ``K``. The objective of", "print(f\"{I_sub=}\") # %% # Check which indices from ``omega`` are there in ``I_sub``.", "keys for generating random numbers key = random.PRNGKey(0) keys = random.split(key, 4) #", "least square solution and keep them in ``I``. * **Update residual**: Compute ``r", "that in the # real implementation of the algorithm, no access to original", "%% # Merge (union) the set of previous K indices with the new", "values from the LS solution x_I = x_sub[Ia] # %% # We now", "%% # Coherence of atoms in the sensing matrix print(crdict.coherence(Phi)) # %% #", "algorithm in action. \"\"\" # %% # Let's import necessary libraries import jax", "residual and verify that it is still larger than the allowed tolerance r_norm_sqr", "Match the current residual with the atoms in ``Phi`` h = Phi.T @", "print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") # %% # Store the selected K indices in the", "Mark the completion of the iteration iterations += 1 # %% # Second", "of ``Phi`` consisting of atoms indexed by ``I_sub`` Phi_sub = Phi[:, flags] #", "%% # At any time, up to 3K atoms may be selected (after", "residual energy and the number of iterations when the algorithm converged. print(solution.r_norm_sqr, solution.iterations)", "# Compute the least squares solution of ``y`` over this subdictionary x_sub, r_sub_norms,", "we found all the actual atoms print(\"Found in I_sub: \", jnp.intersect1d(omega, I_sub)) #", "A boolean array to track the indices selected for least squares steps flags", "# %% # Pick the indices of 2K atoms with largest matches with", "``I`` include all the indices in ``omega`` jnp.setdiff1d(omega, I) # %% # Select", "of iterations completed so far iterations = 0 # %% # A limit", "# %% # We found 6 out of 8 indices in the support.", "Sensing Matching Pursuit) algorithm for sparse recovery. It then shows how to use", "``Phi`` which has been used for data measurements. * A measurement vector ``y``.", "used for data measurements. * A measurement vector ``y``. * The sparsity level", "algorithm for sparse recovery. It then shows how to use the official implementation", "``x0`` and ``omega``. However, note that in the # real implementation of the", "residual ``r`` reduces. The algorithm proceeds as follows: * Initialize the solution ``x``", "it indeed found the solution # It is time to compute the residual", "in ``I`` include all the indices in ``omega`` jnp.setdiff1d(omega, I) # %% #", "step development of CoSaMP (Compressive Sensing Matching Pursuit) algorithm for sparse recovery. It", "of ``y`` over this subdictionary x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y) #", "Print the residual energy and the number of iterations when the algorithm converged.", "Here are the remaining. missing = jnp.setdiff1d(omega, I) print(\"Missing indices: \", missing) #", "norm/energy of the residual y_norm_sqr = float(y.T @ y) r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\")", "development of CoSaMP (Compressive Sensing Matching Pursuit) algorithm for sparse recovery. It then", "in ``CR-Sparse``. The CoSaMP algorithm has following inputs: * A sensing matrix or", "in the flags array flags = flags.at[:].set(False) flags = flags.at[I].set(True) print(jnp.where(flags)) # %%", "flags = flags.at[I].set(True) print(jnp.where(flags)) # %% # Mark the completion of the iteration", "%% # Check which indices from ``omega`` are there in ``I_sub``. print(jnp.intersect1d(omega, I_sub))", "I_sub)) # %% # Indeed we did. The set difference is empty. print(\"Missing", "been selected so far, we can be more aggressive # and pick 3K", "by the algorithm steps by comparing the estimates # with actual ``x0`` and", "jnp # Some keys for generating random numbers key = random.PRNGKey(0) keys =", "selected as part of the solution. * While the residual energy is above", "K3) # Update the flags array flags = flags.at[I_sub].set(True) # Sort the ``I_sub``", "N, K) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0) # %% # ``omega`` contains", "%% # Import the module from cr.sparse.pursuit import cosamp # %% # Run", "x0 plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(y) # %% # Development of CoSaMP", "iteration:\") # %% # Match the current residual with the atoms in ``Phi``", "jnp.intersect1d(omega, I) print(\"Found indices: \", found) # %% # We found 6 out", "import cr.sparse.dict as crdict import cr.sparse.data as crdata # %% # Problem Setup", "to compute the residual after the first iteration Phi_I = Phi[:, I] r", "is our updated estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100,", "iterating now. iterations += 1 # %% # CR-Sparse official implementation # ----------------------------------------", "random import jax.numpy as jnp # Some keys for generating random numbers key", "``x0`` and ``omega``, we can measure the # progress made by the algorithm", "cr.sparse.dict as crdict import cr.sparse.data as crdata # %% # Problem Setup #", "in I_sub: \", jnp.setdiff1d(omega, I_sub)) # %% # Select the subdictionary of ``Phi``", "plt.plot(x0) # %% # ``omega`` contains the set of indices at which x", "``Ia`` to the actual indices of atoms in ``Phi`` I = I_sub[Ia] print(I)", "atoms selected as part of the solution. * While the residual energy is", "random.PRNGKey(0) keys = random.split(key, 4) # For plotting diagrams import matplotlib.pyplot as plt", "Select the subdictionary of ``Phi`` consisting of atoms indexed by I_sub Phi_sub =", "iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") # %% # Match the current residual with", "support of the signal found = jnp.intersect1d(omega, I) print(\"Found indices: \", found) #", "# %% # Number of iterations completed so far iterations = 0 #", "Development of CoSaMP algorithm # --------------------------------- # In the following, we walk through", "The non-zero values on the support x_I = solution.x_I print(x_I) # %% #", "tolerance r_norm_sqr = float(r.T @ r) # It turns out that it is", "of the algorithm successively improves the estimate ``x`` so that the energy of", "can check how good we were in picking the correct indices from the", "and the number of iterations when the algorithm converged. print(solution.r_norm_sqr, solution.iterations) # %%", "no atoms have been selected so far, we can be more aggressive #", "* The sparsity level ``K``. The objective of the algorithm is to estimate", "``Phi x``. A key quantity in the algorithm is the residual ``r =", "of flags array I_sub, = jnp.where(flags) # Since no atoms have been selected", "over this subdictionary x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y) # Pick the", "print(f\"{I_sub=}\") # %% # We can check if we found all the actual", "algorithm is the residual ``r = y - Phi x``. Each iteration of", "# the residual ``r = y - Phi x`` to equal the measurements", "the tolerance threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") # %% # We have completed the", "print(crdict.coherence(Phi)) # %% # A sparse model vector # '''''''''''''''''''''''''' x0, omega =", "is there. # # Initialization # '''''''''''''''''''''''''''''''''''''''''''' # %% # We assume the", "y) # Pick the indices of K largest entries in in ``x_sub`` Ia", "(after the merge step). K3 = K + K2 # %% # Number", "the sparse solution I = solution.I print(I) # %% # The non-zero values", "the initial solution to be zero and # the residual ``r = y", "and pick 3K atoms in first iteration. print(f\"{I_sub=}\") # %% # Check which", "``y`` is approximately equal to ``Phi x``. A key quantity in the algorithm", "the residual after the first iteration Phi_I = Phi[:, I] r = y", "**Update residual**: Compute ``r = y - Phi_I x_I``. It is time to", "set of previous K indices with the new 2K indices flags = flags.at[I_2k].set(True)", "indices of 2K atoms with largest matches with the residual I_2k = crs.largest_indices(h,", "array flags = flags.at[:].set(False) flags = flags.at[I].set(True) print(jnp.where(flags)) # %% # Mark the", "reduces. The algorithm proceeds as follows: * Initialize the solution ``x`` with zero.", "indices from ``omega`` are there in ``I_sub``. print(jnp.intersect1d(omega, I_sub)) # %% # Select", "out that it is now below the tolerance threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") #", "above a threshold: * **Match**: Compute the inner product of each atom in", "of the algorithm is to estimate a K-sparse solution ``x`` such that ``y``", "# We assume the initial solution to be zero and # the residual", "Pick the indices of 2K atoms with largest matches with the residual I_2k", "# A boolean array to track the indices selected for least squares steps", "N = 256 # Sparsity level K = 8 # %% # The", "y - Phi_I x_I``. It is time to see the algorithm in action.", "so that the energy of the residual ``r`` reduces. The algorithm proceeds as", "now below the allowed tolerance r_norm_sqr = float(r.T @ r) # It turns", "= jnp.zeros(N, dtype=bool) # %% # During the matching steps, 2K atoms will", "atom in ``Phi`` with the current residual ``r``. * **Identify**: Select the indices", "example explains the step by step development of CoSaMP (Compressive Sensing Matching Pursuit)", "jnp.intersect1d(omega, I_sub)) # %% # Indeed we did. The set difference is empty.", "in the # real implementation of the algorithm, no access to original model", "at which x is nonzero (support of ``x``) print(omega) # %% # Compressive", "the second iteration Phi_I = Phi[:, I] r = y - Phi_I @", "========================== This example explains the step by step development of CoSaMP (Compressive Sensing", "%% # Run the solver solution = cosamp.matrix_solve_jit(Phi, y, K) # The support", "y_norm_sqr = float(y.T @ y) r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\") # %% # A", "# Sparsity level K = 8 # %% # The Sparsifying Basis #", "found = jnp.intersect1d(omega, I) print(\"Found indices: \", found) # %% # We found", "current residual with the atoms in ``Phi`` h = Phi.T @ r #", "# We can check if we found all the actual atoms print(\"Found in", "in ``Phi`` I = I_sub[Ia] print(f\"{I=}\") # %% # Select the corresponding values", "%% # We found 6 out of 8 indices in the support. Here", "found the solution # It is time to compute the residual after the", "check if these include the atoms missed out in first iteration. print(jnp.intersect1d(omega, I_2k))", "= Phi[:, I] r = y - Phi_I @ x_I # %% #", "is time to compute the residual after the first iteration Phi_I = Phi[:,", "first iteration Phi_I = Phi[:, I] r = y - Phi_I @ x_I", "CR-Sparse official implementation # ---------------------------------------- # The JIT compiled version of this algorithm", "in the support. Here are the remaining. missing = jnp.setdiff1d(omega, I) print(\"Missing indices:", "%% # Compressive measurements # '''''''''''''''''''''''''' y = Phi @ x0 plt.figure(figsize=(8,6), dpi=", "I_sub[Ia] print(f\"{I=}\") # %% # Select the corresponding values from the LS solution", "%% # We have completed the signal recovery. We can stop iterating now.", "threshold: * **Match**: Compute the inner product of each atom in ``Phi`` with", "``omega`` jnp.setdiff1d(omega, I) # %% # Select the corresponding values from the LS", "Pick the largest K entries from this least square solution and keep them", "# '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") # %% # Match the current residual with the", "we were in picking the correct indices from the actual support of the", "plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # The algorithm has no direct", "the corresponding values from the LS solution x_I = x_sub[Ia] # %% #", "time to compute the residual after the second iteration Phi_I = Phi[:, I]", "were in picking the correct indices from the actual support of the signal", "indices from the actual support of the signal found = jnp.intersect1d(omega, I) print(\"Found", "**Merge**: merge these 2K indices with currently selected indices in ``I`` to form", "K3) # We can check if these include the atoms missed out in", "flags.at[I].set(True) print(jnp.where(flags)) # %% # Mark the completion of the iteration iterations +=", "the algorithm is the residual ``r = y - Phi x``. Each iteration", "# %% # A boolean array to track the indices selected for least", "if we found all the actual atoms print(\"Found in I_sub: \", jnp.intersect1d(omega, I_sub))", "K2 # %% # Number of iterations completed so far iterations = 0", "the completion of the iteration iterations += 1 # %% # Second iteration", "Select the subdictionary of ``Phi`` consisting of atoms indexed by ``I_sub`` Phi_sub =", "the actual support of the signal found = jnp.intersect1d(omega, I) print(\"Found indices: \",", "indices flags = flags.at[I_2k].set(True) I_sub, = jnp.where(flags) print(f\"{I_sub=}\") # %% # We can", "The set difference is empty. print(\"Missing in I_sub: \", jnp.setdiff1d(omega, I_sub)) # %%", "in # ``cr.sparse.pursuit.cosamp`` module. # %% # Import the module from cr.sparse.pursuit import", "squares steps flags = jnp.zeros(N, dtype=bool) # %% # During the matching steps,", "dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() #", "K = 8 # %% # The Sparsifying Basis # '''''''''''''''''''''''''' Phi =", "%% # Select the corresponding values from the LS solution x_I = x_sub[Ia]", "edgecolor='k') plt.plot(x0) # %% # ``omega`` contains the set of indices at which", "# %% # Compute the least squares solution of ``y`` over this subdictionary", "= Phi[:, flags] # %% # Compute the least squares solution of ``y``", "the merge step). K3 = K + K2 # %% # Number of", "print(jnp.setdiff1d(omega, I)) # %% # Print the residual energy and the number of", "sparse model vector # '''''''''''''''''''''''''' x0, omega = crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6), dpi=", "* **Identify**: Select the indices of 2K atoms from ``Phi`` with the largest", "# We have completed the signal recovery. We can stop iterating now. iterations", "of CoSaMP algorithm. # Since we have access to ``x0`` and ``omega``, we", "# %% # We have completed the signal recovery. We can stop iterating", "%% # We assume the initial solution to be zero and # the", "solution of ``Phi[:, I_sub] z = y`` * **Prune**: Pick the largest K", "indices of atoms in ``Phi`` I = I_sub[Ia] print(I) # %% # Check", "solution to be zero and # the residual ``r = y - Phi", "+ K2 # %% # Number of iterations completed so far iterations =", "M = 128 # Ambient dimension N = 256 # Sparsity level K", "actual support of the signal found = jnp.intersect1d(omega, I) print(\"Found indices: \", found)", "This example explains the step by step development of CoSaMP (Compressive Sensing Matching", "# It turns out that it is now below the tolerance threshold print(f\"{r_norm_sqr=:.2e}", "iterations = 0 # %% # A limit on the maximum tolerance for", "least squares solution of ``Phi[:, I_sub] z = y`` * **Prune**: Pick the", "indices in the flags array flags = flags.at[:].set(False) flags = flags.at[I].set(True) print(jnp.where(flags)) #", "K + K2 # %% # Number of iterations completed so far iterations", "tolerance for residual norm res_norm_rtol = 1e-3 max_r_norm_sqr = y_norm_sqr * (res_norm_rtol **", "second iteration Phi_I = Phi[:, I] r = y - Phi_I @ x_I", "the signal recovery. We can stop iterating now. iterations += 1 # %%", "crs.largest_indices(x_sub, K) print(Ia) # %% # We need to map the indices in", "residual norm res_norm_rtol = 1e-3 max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\")", "= random.PRNGKey(0) keys = random.split(key, 4) # For plotting diagrams import matplotlib.pyplot as", "``Phi`` I = I_sub[Ia] print(f\"{I=}\") # %% # Select the corresponding values from", "r = y - Phi_I @ x_I # %% # Compute the residual", "the signal found = jnp.intersect1d(omega, I) print(\"Found indices: \", found) # %% #", "the algorithm is to estimate a K-sparse solution ``x`` such that ``y`` is", "A sparse model vector # '''''''''''''''''''''''''' x0, omega = crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6),", "= K + K2 # %% # Number of iterations completed so far", "for the sparse solution I = solution.I print(I) # %% # The non-zero", "cr.sparse.pursuit import cosamp # %% # Run the solver solution = cosamp.matrix_solve_jit(Phi, y,", "largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(f\"{Ia=}\") # %% #", "the largest K entries from this least square solution and keep them in", "key quantity in the algorithm is the residual ``r = y - Phi", "<reponame>carnot-shailesh/cr-sparse \"\"\" CoSaMP step by step ========================== This example explains the step by", "# %% # Compute the residual and verify that it is still larger", "We can check how good we were in picking the correct indices from", "in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(f\"{Ia=}\") # %% # We need to", "jnp.setdiff1d(omega, I) # %% # Select the corresponding values from the LS solution", "to the actual indices of atoms in ``Phi`` I = I_sub[Ia] print(f\"{I=}\") #", "= I_sub[Ia] print(f\"{I=}\") # %% # Select the corresponding values from the LS", "access to original model # vector is there. # # Initialization # ''''''''''''''''''''''''''''''''''''''''''''", "solution of ``y`` over this subdictionary x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y)", "knowing that it indeed found the solution # It is time to compute", "``Phi[:, I_sub] z = y`` * **Prune**: Pick the largest K entries from", "I_sub] z = y`` * **Prune**: Pick the largest K entries from this", "algorithm has following inputs: * A sensing matrix or dictionary ``Phi`` which has", "# Print the residual energy and the number of iterations when the algorithm", "steps flags = jnp.zeros(N, dtype=bool) # %% # During the matching steps, 2K", "x is nonzero (support of ``x``) print(omega) # %% # Compressive measurements #", "picked. K2 = 2*K # %% # At any time, up to 3K", "= jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated", "which has been used for data measurements. * A measurement vector ``y``. *", "omega = crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0) # %%", "Phi = crdict.gaussian_mtx(key, M,N) print(Phi.shape) # %% # Coherence of atoms in the", "largest K entries from this least square solution and keep them in ``I``.", "= flags.at[I_2k].set(True) I_sub, = jnp.where(flags) print(f\"{I_sub=}\") # %% # We can check if", "array I_sub, = jnp.where(flags) # Since no atoms have been selected so far,", "Since we have access to ``x0`` and ``omega``, we can measure the #", "edgecolor='k') plt.plot(y) # %% # Development of CoSaMP algorithm # --------------------------------- # In", "cr.sparse as crs import cr.sparse.dict as crdict import cr.sparse.data as crdata # %%", "be picked. K2 = 2*K # %% # At any time, up to", "solution x_I = x_sub[Ia] # %% # Here is our updated estimate of", "@ r # %% # Pick the indices of 3K atoms with largest", "# Mark the completion of the iteration iterations += 1 # %% #", "track the indices selected for least squares steps flags = jnp.zeros(N, dtype=bool) #", "rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y) # Pick the indices of K largest entries", "= y`` * **Prune**: Pick the largest K entries from this least square", "set of indices at which x is nonzero (support of ``x``) print(omega) #", "@ r) # It turns out that it is now below the tolerance", "first estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')", "the estimate ``x`` so that the energy of the residual ``r`` reduces. The", "as part of the solution. * While the residual energy is above a", "allowed tolerance r_norm_sqr = float(r.T @ r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") # %% #", "the LS solution x_I = x_sub[Ia] # %% # Here is our updated", "the inner product of each atom in ``Phi`` with the current residual ``r``.", "100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %%", "version of this algorithm is available in # ``cr.sparse.pursuit.cosamp`` module. # %% #", "# Second iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") # %% # Match the current", "# ---------------------------------------- # The JIT compiled version of this algorithm is available in", "algorithm, no access to original model # vector is there. # # Initialization", "help of flags array I_sub, = jnp.where(flags) # Since no atoms have been", "consisting of atoms indexed by ``I_sub`` Phi_sub = Phi[:, flags] # %% #", "jax import random import jax.numpy as jnp # Some keys for generating random", "progress made by the algorithm steps by comparing the estimates # with actual", "# # Initialization # '''''''''''''''''''''''''''''''''''''''''''' # %% # We assume the initial solution", "the residual energy is above a threshold: * **Match**: Compute the inner product", "recovery. We can stop iterating now. iterations += 1 # %% # CR-Sparse", "atoms missed out in first iteration. print(jnp.intersect1d(omega, I_2k)) # %% # Merge (union)", "It is time to see the algorithm in action. \"\"\" # %% #", "index set ``I`` (initially empty) of atoms selected as part of the solution.", "# '''''''''''''''''''''''''''''''''''''''''''' # %% # We assume the initial solution to be zero", "# %% # Match the current residual with the atoms in ``Phi`` h", "to original model # vector is there. # # Initialization # '''''''''''''''''''''''''''''''''''''''''''' #", "print(\"First iteration:\") # %% # Match the current residual with the atoms in", "* Maintain an index set ``I`` (initially empty) of atoms selected as part", "largest matches with the residual I_sub = crs.largest_indices(h, K3) # Update the flags", "as crs import cr.sparse.dict as crdict import cr.sparse.data as crdata # %% #", "the actual atoms print(\"Found in I_sub: \", jnp.intersect1d(omega, I_sub)) # %% # Indeed", "of K largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(Ia) #", "``Phi`` with the current residual ``r``. * **Identify**: Select the indices of 2K", "to compute the residual after the second iteration Phi_I = Phi[:, I] r", "%% # Compute the residual and verify that it is now below the", "subdictionary of ``Phi`` consisting of atoms indexed by ``I_sub`` Phi_sub = Phi[:, flags]", "of K largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(f\"{Ia=}\") #", "support for the sparse solution I = solution.I print(I) # %% # The", "verify that it is still larger than the allowed tolerance r_norm_sqr = float(r.T", "made by the algorithm steps by comparing the estimates # with actual ``x0``", "equal to ``Phi x``. A key quantity in the algorithm is the residual", "JIT compiled version of this algorithm is available in # ``cr.sparse.pursuit.cosamp`` module. #", "# %% # Let's plot the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100,", "iteration. print(jnp.intersect1d(omega, I_2k)) # %% # Merge (union) the set of previous K", "time to see the algorithm in action. \"\"\" # %% # Let's import", "%% # We can check if we found all the actual atoms print(\"Found", "solution\") plt.legend() # %% # We can check how good we were in", "# Select the subdictionary of ``Phi`` consisting of atoms indexed by ``I_sub`` Phi_sub", "import jax.numpy as jnp # Some keys for generating random numbers key =", "# A limit on the maximum tolerance for residual norm res_norm_rtol = 1e-3", "we did. The set difference is empty. print(\"Missing in I_sub: \", jnp.setdiff1d(omega, I_sub))", "subdictionary of ``Phi`` consisting of atoms indexed by I_sub Phi_sub = Phi[:, flags]", "of 3K atoms with largest matches with the residual I_sub = crs.largest_indices(h, K3)", "with the new 2K indices flags = flags.at[I_2k].set(True) I_sub, = jnp.where(flags) print(f\"{I_sub=}\") #", "# Update the flags array flags = flags.at[I_sub].set(True) # Sort the ``I_sub`` array", "residual I_2k = crs.largest_indices(h, K2 if iterations else K3) # We can check", "following, we walk through the steps of CoSaMP algorithm. # Since we have", "Compute the residual and verify that it is still larger than the allowed", "# For plotting diagrams import matplotlib.pyplot as plt # CR-Sparse modules import cr.sparse", "Problem Setup # ------------------ # Number of measurements M = 128 # Ambient", "# %% # Mark the completion of the iteration iterations += 1 #", "plot the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original", "indices in ``I`` include all the indices in ``omega`` jnp.setdiff1d(omega, I) # %%", "'''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key, M,N) print(Phi.shape) # %% # Coherence of atoms in", "completed the signal recovery. We can stop iterating now. iterations += 1 #", "approximately equal to ``Phi x``. A key quantity in the algorithm is the", "way of knowing that it indeed found the solution # It is time", "label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # The algorithm has", "plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # We can check how good", "1 # %% # CR-Sparse official implementation # ---------------------------------------- # The JIT compiled", "with actual ``x0`` and ``omega``. However, note that in the # real implementation", "on the support x_I = solution.x_I print(x_I) # %% # Verify that we", "no access to original model # vector is there. # # Initialization #", "Coherence of atoms in the sensing matrix print(crdict.coherence(Phi)) # %% # A sparse", "the # real implementation of the algorithm, no access to original model #", "found) # %% # We found 6 out of 8 indices in the", "I_sub[Ia] print(I) # %% # Check if the final K indices in ``I``", "max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\") # %% # First iteration", "the new 2K indices flags = flags.at[I_2k].set(True) I_sub, = jnp.where(flags) print(f\"{I_sub=}\") # %%", "First iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") # %% # Match the current residual", "edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # The", "did. The set difference is empty. print(\"Missing in I_sub: \", jnp.setdiff1d(omega, I_sub)) #", "the steps of CoSaMP algorithm. # Since we have access to ``x0`` and", "flags = flags.at[I_2k].set(True) I_sub, = jnp.where(flags) print(f\"{I_sub=}\") # %% # We can check", "to be zero and # the residual ``r = y - Phi x``", "``Phi`` with the largest correlation with the residual. * **Merge**: merge these 2K", "# %% # Indeed we did. The set difference is empty. print(\"Missing in", "is time to compute the residual after the second iteration Phi_I = Phi[:,", "= Phi @ x0 plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(y) # %% #", "is empty. print(\"Missing in I_sub: \", jnp.setdiff1d(omega, I_sub)) # %% # Select the", "res_norm_rtol = 1e-3 max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\") # %%", "``r = y - Phi x``. Each iteration of the algorithm successively improves", "import jax from jax import random import jax.numpy as jnp # Some keys", "the support print(jnp.setdiff1d(omega, I)) # %% # Print the residual energy and the", "%% # Verify that we successfully recovered the support print(jnp.setdiff1d(omega, I)) # %%", "difference is empty. print(\"Missing in I_sub: \", jnp.setdiff1d(omega, I_sub)) # %% # Select", "3K atoms with largest matches with the residual I_sub = crs.largest_indices(h, K3) #", "# '''''''''''''''''''''''''' y = Phi @ x0 plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(y)", "* While the residual energy is above a threshold: * **Match**: Compute the", "solution I = solution.I print(I) # %% # The non-zero values on the", "first iteration. print(f\"{I_sub=}\") # %% # Check which indices from ``omega`` are there", "# %% # Compute the residual and verify that it is now below", "first iteration. print(jnp.intersect1d(omega, I_2k)) # %% # Merge (union) the set of previous", "of 8 indices in the support. Here are the remaining. missing = jnp.setdiff1d(omega,", "indices of 2K atoms from ``Phi`` with the largest correlation with the residual.", "jnp.setdiff1d(omega, I_sub)) # %% # Select the subdictionary of ``Phi`` consisting of atoms", "for residual norm res_norm_rtol = 1e-3 max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2)", "which indices from ``omega`` are there in ``I_sub``. print(jnp.intersect1d(omega, I_sub)) # %% #", "and # the residual ``r = y - Phi x`` to equal the", "map the indices in ``Ia`` to the actual indices of atoms in ``Phi``", "is still larger than the allowed tolerance r_norm_sqr = float(r.T @ r) print(f\"{r_norm_sqr=:.2e}", "the first iteration Phi_I = Phi[:, I] r = y - Phi_I @", "allowed tolerance r_norm_sqr = float(r.T @ r) # It turns out that it", "contains the set of indices at which x is nonzero (support of ``x``)", "to ``x0`` and ``omega``, we can measure the # progress made by the", "# %% # We now have our first estimate of the solution x", "part of the solution. * While the residual energy is above a threshold:", "jax from jax import random import jax.numpy as jnp # Some keys for", "Phi_I x_I``. It is time to see the algorithm in action. \"\"\" #", "sparsity level ``K``. The objective of the algorithm is to estimate a K-sparse", "it is now below the allowed tolerance r_norm_sqr = float(r.T @ r) #", "indices in ``Ia`` to the actual indices of atoms in ``Phi`` I =", "# %% # Import the module from cr.sparse.pursuit import cosamp # %% #", "Compute the residual and verify that it is now below the allowed tolerance", "Phi_I = Phi[:, I] r = y - Phi_I @ x_I # %%", "%% # Store the selected K indices in the flags array flags =", "in ``omega`` jnp.setdiff1d(omega, I) # %% # Select the corresponding values from the", "or dictionary ``Phi`` which has been used for data measurements. * A measurement", "atoms will be picked. K2 = 2*K # %% # At any time,", "random numbers key = random.PRNGKey(0) keys = random.split(key, 4) # For plotting diagrams", "- Phi_I @ x_I # %% # Compute the residual and verify that", "shows how to use the official implementation of CoSaMP in ``CR-Sparse``. The CoSaMP", "residual**: Compute ``r = y - Phi_I x_I``. It is time to see", "Update the flags array flags = flags.at[I_sub].set(True) # Sort the ``I_sub`` array with", "are the remaining. missing = jnp.setdiff1d(omega, I) print(\"Missing indices: \", missing) # %%", "'''''''''''''''''''''''''' x0, omega = crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0)", "below the allowed tolerance r_norm_sqr = float(r.T @ r) # It turns out", "{max_r_norm_sqr=:.2e}\") # %% # Store the selected K indices in the flags array", "``K``. The objective of the algorithm is to estimate a K-sparse solution ``x``", "has no direct way of knowing that it indeed found the solution #", "facecolor='w', edgecolor='k') plt.plot(y) # %% # Development of CoSaMP algorithm # --------------------------------- #", "K indices in the flags array flags = flags.at[:].set(False) flags = flags.at[I].set(True) print(jnp.where(flags))", "I = I_sub[Ia] print(f\"{I=}\") # %% # Select the corresponding values from the", "correct indices from the actual support of the signal found = jnp.intersect1d(omega, I)", "diagrams import matplotlib.pyplot as plt # CR-Sparse modules import cr.sparse as crs import", "the allowed tolerance r_norm_sqr = float(r.T @ r) # It turns out that", "Compute the least squares solution of ``y`` over this subdictionary x_sub, r_sub_norms, rank_sub,", "print(f\"{Ia=}\") # %% # We need to map the indices in ``Ia`` to", "it is now below the tolerance threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") # %% #", "``cr.sparse.pursuit.cosamp`` module. # %% # Import the module from cr.sparse.pursuit import cosamp #", "Since no atoms have been selected so far, we can be more aggressive", "# It is time to compute the residual after the first iteration Phi_I", "``Phi`` h = Phi.T @ r # %% # Pick the indices of", "I) print(\"Found indices: \", found) # %% # We found 6 out of", "Merge (union) the set of previous K indices with the new 2K indices", "%% # A boolean array to track the indices selected for least squares", "objective of the algorithm is to estimate a K-sparse solution ``x`` such that", "= y_norm_sqr print(f\"{r_norm_sqr=}\") # %% # A boolean array to track the indices", "K3 = K + K2 # %% # Number of iterations completed so", "For plotting diagrams import matplotlib.pyplot as plt # CR-Sparse modules import cr.sparse as", "print(Ia) # %% # We need to map the indices in ``Ia`` to", "%% # Let's plot the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w',", "It is time to compute the residual after the first iteration Phi_I =", "Sparsity level K = 8 # %% # The Sparsifying Basis # ''''''''''''''''''''''''''", "%% # Compute the least squares solution of ``y`` over this subdictionary x_sub,", "to form ``I_sub``. * **LS**: Compute the least squares solution of ``Phi[:, I_sub]", "# CR-Sparse official implementation # ---------------------------------------- # The JIT compiled version of this", "flags.at[I_sub].set(True) # Sort the ``I_sub`` array with the help of flags array I_sub,", "atoms in ``Phi`` h = Phi.T @ r # %% # Pick the", "Phi.T @ r # %% # Pick the indices of 3K atoms with", "the correct indices from the actual support of the signal found = jnp.intersect1d(omega,", "is approximately equal to ``Phi x``. A key quantity in the algorithm is", "%% # Second iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") # %% # Match the", "= float(y.T @ y) r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\") # %% # A boolean", "Let's plot the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0,", "jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\")", "we walk through the steps of CoSaMP algorithm. # Since we have access", "in the sensing matrix print(crdict.coherence(Phi)) # %% # A sparse model vector #", "we successfully recovered the support print(jnp.setdiff1d(omega, I)) # %% # Print the residual", "%% # During the matching steps, 2K atoms will be picked. K2 =", "# --------------------------------- # In the following, we walk through the steps of CoSaMP", "(Compressive Sensing Matching Pursuit) algorithm for sparse recovery. It then shows how to", "``r = y - Phi x`` to equal the measurements ``y`` r =", "measurements ``y`` r = y # %% # Squared norm/energy of the residual", "= 8 # %% # The Sparsifying Basis # '''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key,", "'''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") # %% # Match the current residual with the atoms", "Store the selected K indices in the flags array flags = flags.at[:].set(False) flags", "= solution.I print(I) # %% # The non-zero values on the support x_I", "atoms print(\"Found in I_sub: \", jnp.intersect1d(omega, I_sub)) # %% # Indeed we did.", "of the signal found = jnp.intersect1d(omega, I) print(\"Found indices: \", found) # %%", "x_I``. It is time to see the algorithm in action. \"\"\" # %%", "edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # We", "= cosamp.matrix_solve_jit(Phi, y, K) # The support for the sparse solution I =", "# ``omega`` contains the set of indices at which x is nonzero (support", "plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(y) # %% # Development of CoSaMP algorithm", "I = solution.I print(I) # %% # The non-zero values on the support", "merge these 2K indices with currently selected indices in ``I`` to form ``I_sub``.", "y - Phi x`` to equal the measurements ``y`` r = y #", "the algorithm converged. print(solution.r_norm_sqr, solution.iterations) # %% # Let's plot the solution x", "solution and keep them in ``I``. * **Update residual**: Compute ``r = y", "measurements. * A measurement vector ``y``. * The sparsity level ``K``. The objective", "all the actual atoms print(\"Found in I_sub: \", jnp.intersect1d(omega, I_sub)) # %% #", "A measurement vector ``y``. * The sparsity level ``K``. The objective of the", "on the maximum tolerance for residual norm res_norm_rtol = 1e-3 max_r_norm_sqr = y_norm_sqr", "of atoms in ``Phi`` I = I_sub[Ia] print(f\"{I=}\") # %% # Select the", "2K atoms will be picked. K2 = 2*K # %% # At any", "form ``I_sub``. * **LS**: Compute the least squares solution of ``Phi[:, I_sub] z", "crs.largest_indices(h, K3) # Update the flags array flags = flags.at[I_sub].set(True) # Sort the", "modules import cr.sparse as crs import cr.sparse.dict as crdict import cr.sparse.data as crdata", "residual after the first iteration Phi_I = Phi[:, I] r = y -", "far, we can be more aggressive # and pick 3K atoms in first", "# Indeed we did. The set difference is empty. print(\"Missing in I_sub: \",", "selected (after the merge step). K3 = K + K2 # %% #", "``Phi`` consisting of atoms indexed by I_sub Phi_sub = Phi[:, flags] # %%", "CoSaMP algorithm # --------------------------------- # In the following, we walk through the steps", "solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x,", "# Number of measurements M = 128 # Ambient dimension N = 256", "of ``Phi[:, I_sub] z = y`` * **Prune**: Pick the largest K entries", "our first estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w',", "# The support for the sparse solution I = solution.I print(I) # %%", "official implementation of CoSaMP in ``CR-Sparse``. The CoSaMP algorithm has following inputs: *", "as crdata # %% # Problem Setup # ------------------ # Number of measurements", "the set of previous K indices with the new 2K indices flags =", "= jnp.linalg.lstsq(Phi_sub, y) # Pick the indices of K largest entries in in", "# Match the current residual with the atoms in ``Phi`` h = Phi.T", "as plt # CR-Sparse modules import cr.sparse as crs import cr.sparse.dict as crdict", "the following, we walk through the steps of CoSaMP algorithm. # Since we", "%% # First iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") # %% # Match the", "number of iterations when the algorithm converged. print(solution.r_norm_sqr, solution.iterations) # %% # Let's", "During the matching steps, 2K atoms will be picked. K2 = 2*K #", "proceeds as follows: * Initialize the solution ``x`` with zero. * Maintain an", "# We need to map the indices in ``Ia`` to the actual indices", "can check if we found all the actual atoms print(\"Found in I_sub: \",", "We need to map the indices in ``Ia`` to the actual indices of", "Pick the indices of K largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub,", "which x is nonzero (support of ``x``) print(omega) # %% # Compressive measurements", "is the residual ``r = y - Phi x``. Each iteration of the", "inner product of each atom in ``Phi`` with the current residual ``r``. *", "+= 1 # %% # CR-Sparse official implementation # ---------------------------------------- # The JIT", "= solution.x_I print(x_I) # %% # Verify that we successfully recovered the support", "correlation with the residual. * **Merge**: merge these 2K indices with currently selected", "float(r.T @ r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") # %% # Store the selected K", "now below the tolerance threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") # %% # We have", "``x`` with zero. * Maintain an index set ``I`` (initially empty) of atoms", "%% # A sparse model vector # '''''''''''''''''''''''''' x0, omega = crdata.sparse_normal_representations(key, N,", "with largest matches with the residual I_2k = crs.largest_indices(h, K2 if iterations else", "of CoSaMP algorithm # --------------------------------- # In the following, we walk through the", "Verify that we successfully recovered the support print(jnp.setdiff1d(omega, I)) # %% # Print", "if iterations else K3) # We can check if these include the atoms", "access to ``x0`` and ``omega``, we can measure the # progress made by", "print(\"Found indices: \", found) # %% # We found 6 out of 8", "to track the indices selected for least squares steps flags = jnp.zeros(N, dtype=bool)", "We found 6 out of 8 indices in the support. Here are the", "--------------------------------- # In the following, we walk through the steps of CoSaMP algorithm.", "level ``K``. The objective of the algorithm is to estimate a K-sparse solution", "= y - Phi_I @ x_I # %% # Compute the residual and", "these 2K indices with currently selected indices in ``I`` to form ``I_sub``. *", "is nonzero (support of ``x``) print(omega) # %% # Compressive measurements # ''''''''''''''''''''''''''", "jnp.where(flags) # Since no atoms have been selected so far, we can be", "h = Phi.T @ r # %% # Pick the indices of 3K", "# %% # Coherence of atoms in the sensing matrix print(crdict.coherence(Phi)) # %%", "be zero and # the residual ``r = y - Phi x`` to", "* **Match**: Compute the inner product of each atom in ``Phi`` with the", "maximum tolerance for residual norm res_norm_rtol = 1e-3 max_r_norm_sqr = y_norm_sqr * (res_norm_rtol", "aggressive # and pick 3K atoms in first iteration. print(f\"{I_sub=}\") # %% #", "LS solution x_I = x_sub[Ia] # %% # Here is our updated estimate", "norm res_norm_rtol = 1e-3 max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\") #", "the official implementation of CoSaMP in ``CR-Sparse``. The CoSaMP algorithm has following inputs:", "# ------------------ # Number of measurements M = 128 # Ambient dimension N", "note that in the # real implementation of the algorithm, no access to", "values from the LS solution x_I = x_sub[Ia] # %% # Here is", "# Problem Setup # ------------------ # Number of measurements M = 128 #", "compute the residual after the first iteration Phi_I = Phi[:, I] r =", "the residual ``r`` reduces. The algorithm proceeds as follows: * Initialize the solution", "print(\"Missing indices: \", missing) # %% # It is time to compute the", "# %% # Development of CoSaMP algorithm # --------------------------------- # In the following,", "use the official implementation of CoSaMP in ``CR-Sparse``. The CoSaMP algorithm has following", "support. Here are the remaining. missing = jnp.setdiff1d(omega, I) print(\"Missing indices: \", missing)", "= jnp.intersect1d(omega, I) print(\"Found indices: \", found) # %% # We found 6", "x``. A key quantity in the algorithm is the residual ``r = y", "``r = y - Phi_I x_I``. It is time to see the algorithm", "from the actual support of the signal found = jnp.intersect1d(omega, I) print(\"Found indices:", "crdict import cr.sparse.data as crdata # %% # Problem Setup # ------------------ #", "as follows: * Initialize the solution ``x`` with zero. * Maintain an index", "indices of atoms in ``Phi`` I = I_sub[Ia] print(f\"{I=}\") # %% # Select", "with the residual I_2k = crs.largest_indices(h, K2 if iterations else K3) # We", "solution. * While the residual energy is above a threshold: * **Match**: Compute", "``omega`` are there in ``I_sub``. print(jnp.intersect1d(omega, I_sub)) # %% # Select the subdictionary", "of ``Phi`` consisting of atoms indexed by I_sub Phi_sub = Phi[:, flags] #", "# %% # At any time, up to 3K atoms may be selected", "generating random numbers key = random.PRNGKey(0) keys = random.split(key, 4) # For plotting", "the algorithm successively improves the estimate ``x`` so that the energy of the", "product of each atom in ``Phi`` with the current residual ``r``. * **Identify**:", "print(solution.r_norm_sqr, solution.iterations) # %% # Let's plot the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6),", "\"\"\" CoSaMP step by step ========================== This example explains the step by step", "# Select the corresponding values from the LS solution x_I = x_sub[Ia] #", "solution.x_I print(x_I) # %% # Verify that we successfully recovered the support print(jnp.setdiff1d(omega,", "# %% # We can check how good we were in picking the", "the indices of 2K atoms from ``Phi`` with the largest correlation with the", "the solution ``x`` with zero. * Maintain an index set ``I`` (initially empty)", "= Phi.T @ r # %% # Pick the indices of 3K atoms", "more aggressive # and pick 3K atoms in first iteration. print(f\"{I_sub=}\") # %%", "through the steps of CoSaMP algorithm. # Since we have access to ``x0``", "CoSaMP algorithm has following inputs: * A sensing matrix or dictionary ``Phi`` which", "Select the corresponding values from the LS solution x_I = x_sub[Ia] # %%", "x_I = solution.x_I print(x_I) # %% # Verify that we successfully recovered the", "indices of K largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(f\"{Ia=}\")", "The support for the sparse solution I = solution.I print(I) # %% #", "of previous K indices with the new 2K indices flags = flags.at[I_2k].set(True) I_sub,", "of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original", "Pursuit) algorithm for sparse recovery. It then shows how to use the official", "step by step development of CoSaMP (Compressive Sensing Matching Pursuit) algorithm for sparse", "# %% # We assume the initial solution to be zero and #", "of atoms indexed by I_sub Phi_sub = Phi[:, flags] # %% # Compute", "the atoms in ``Phi`` h = Phi.T @ r # %% # Pick", "the indices in ``Ia`` to the actual indices of atoms in ``Phi`` I", "flags = flags.at[I_sub].set(True) # Sort the ``I_sub`` array with the help of flags", "label=\"Estimated solution\") plt.legend() # %% # We can check how good we were", "previous K indices with the new 2K indices flags = flags.at[I_2k].set(True) I_sub, =", "in action. \"\"\" # %% # Let's import necessary libraries import jax from", "LS solution x_I = x_sub[Ia] # %% # We now have our first", "iterations when the algorithm converged. print(solution.r_norm_sqr, solution.iterations) # %% # Let's plot the", "Phi x``. Each iteration of the algorithm successively improves the estimate ``x`` so", "in the algorithm is the residual ``r = y - Phi x``. Each", "matching steps, 2K atoms will be picked. K2 = 2*K # %% #", "Maintain an index set ``I`` (initially empty) of atoms selected as part of", "have been selected so far, we can be more aggressive # and pick", "we can be more aggressive # and pick 3K atoms in first iteration.", "cr.sparse.data as crdata # %% # Problem Setup # ------------------ # Number of", "how good we were in picking the correct indices from the actual support", "least squares steps flags = jnp.zeros(N, dtype=bool) # %% # During the matching", "%% # Problem Setup # ------------------ # Number of measurements M = 128", "if the final K indices in ``I`` include all the indices in ``omega``", "# Let's plot the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')", "of atoms indexed by ``I_sub`` Phi_sub = Phi[:, flags] # %% # Compute", "# %% # Merge (union) the set of previous K indices with the", "In the following, we walk through the steps of CoSaMP algorithm. # Since", "implementation # ---------------------------------------- # The JIT compiled version of this algorithm is available", "no direct way of knowing that it indeed found the solution # It", "jnp.where(flags) print(f\"{I_sub=}\") # %% # We can check if we found all the", "Compute ``r = y - Phi_I x_I``. It is time to see the", "the residual y_norm_sqr = float(y.T @ y) r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\") # %%", "print(I) # %% # Check if the final K indices in ``I`` include", "K) print(Ia) # %% # We need to map the indices in ``Ia``", "have completed the signal recovery. We can stop iterating now. iterations += 1", "algorithm is to estimate a K-sparse solution ``x`` such that ``y`` is approximately", "label=\"Estimated solution\") plt.legend() # %% # The algorithm has no direct way of", "we have access to ``x0`` and ``omega``, we can measure the # progress", "# %% # A sparse model vector # '''''''''''''''''''''''''' x0, omega = crdata.sparse_normal_representations(key,", "official implementation # ---------------------------------------- # The JIT compiled version of this algorithm is", "real implementation of the algorithm, no access to original model # vector is", "%% # The Sparsifying Basis # '''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key, M,N) print(Phi.shape) #", "with the residual I_sub = crs.largest_indices(h, K3) # Update the flags array flags", "Each iteration of the algorithm successively improves the estimate ``x`` so that the", "I] r = y - Phi_I @ x_I # %% # Compute the", "dimension N = 256 # Sparsity level K = 8 # %% #", "z = y`` * **Prune**: Pick the largest K entries from this least", "= I_sub[Ia] print(I) # %% # Check if the final K indices in", "solution = cosamp.matrix_solve_jit(Phi, y, K) # The support for the sparse solution I", "of ``x``) print(omega) # %% # Compressive measurements # '''''''''''''''''''''''''' y = Phi", "of the algorithm, no access to original model # vector is there. #", "I_sub: \", jnp.setdiff1d(omega, I_sub)) # %% # Select the subdictionary of ``Phi`` consisting", "CoSaMP (Compressive Sensing Matching Pursuit) algorithm for sparse recovery. It then shows how", "array with the help of flags array I_sub, = jnp.where(flags) # Since no", "measure the # progress made by the algorithm steps by comparing the estimates", "then shows how to use the official implementation of CoSaMP in ``CR-Sparse``. The", "the number of iterations when the algorithm converged. print(solution.r_norm_sqr, solution.iterations) # %% #", "x_I = x_sub[Ia] # %% # Here is our updated estimate of the", "# We can check how good we were in picking the correct indices", "``y``. * The sparsity level ``K``. The objective of the algorithm is to", "**Match**: Compute the inner product of each atom in ``Phi`` with the current", "facecolor='w', edgecolor='k') plt.plot(x0) # %% # ``omega`` contains the set of indices at", "jnp.zeros(N, dtype=bool) # %% # During the matching steps, 2K atoms will be", "random.split(key, 4) # For plotting diagrams import matplotlib.pyplot as plt # CR-Sparse modules", "and ``omega``. However, note that in the # real implementation of the algorithm,", "if these include the atoms missed out in first iteration. print(jnp.intersect1d(omega, I_2k)) #", "The sparsity level ``K``. The objective of the algorithm is to estimate a", "Run the solver solution = cosamp.matrix_solve_jit(Phi, y, K) # The support for the", "* **Merge**: merge these 2K indices with currently selected indices in ``I`` to", "the residual I_2k = crs.largest_indices(h, K2 if iterations else K3) # We can", "%% # Pick the indices of 2K atoms with largest matches with the", "algorithm successively improves the estimate ``x`` so that the energy of the residual", "+= 1 # %% # Second iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") # %%", "the least squares solution of ``Phi[:, I_sub] z = y`` * **Prune**: Pick", "# %% # Check which indices from ``omega`` are there in ``I_sub``. print(jnp.intersect1d(omega,", "atoms indexed by I_sub Phi_sub = Phi[:, flags] # %% # Compute the", "indices: \", missing) # %% # It is time to compute the residual", "of 2K atoms with largest matches with the residual I_2k = crs.largest_indices(h, K2", "plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # We can", "crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0) # %% # ``omega``", "x_I # %% # Compute the residual and verify that it is still", "the flags array flags = flags.at[:].set(False) flags = flags.at[I].set(True) print(jnp.where(flags)) # %% #", "We can check if these include the atoms missed out in first iteration.", "by I_sub Phi_sub = Phi[:, flags] # %% # Compute the least squares", "with the current residual ``r``. * **Identify**: Select the indices of 2K atoms", "Basis # '''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key, M,N) print(Phi.shape) # %% # Coherence of", "the indices of K largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K)", "from cr.sparse.pursuit import cosamp # %% # Run the solver solution = cosamp.matrix_solve_jit(Phi,", "the current residual ``r``. * **Identify**: Select the indices of 2K atoms from", "plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # The algorithm", "in ``I_sub``. print(jnp.intersect1d(omega, I_sub)) # %% # Select the subdictionary of ``Phi`` consisting", "the largest correlation with the residual. * **Merge**: merge these 2K indices with", "from the LS solution x_I = x_sub[Ia] # %% # Here is our", "# Import the module from cr.sparse.pursuit import cosamp # %% # Run the", "# Run the solver solution = cosamp.matrix_solve_jit(Phi, y, K) # The support for", "estimate ``x`` so that the energy of the residual ``r`` reduces. The algorithm", "crs import cr.sparse.dict as crdict import cr.sparse.data as crdata # %% # Problem", "I = I_sub[Ia] print(I) # %% # Check if the final K indices", "(res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\") # %% # First iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\")", "- Phi x`` to equal the measurements ``y`` r = y # %%", "the residual. * **Merge**: merge these 2K indices with currently selected indices in", "improves the estimate ``x`` so that the energy of the residual ``r`` reduces.", "We can check if we found all the actual atoms print(\"Found in I_sub:", "flags array flags = flags.at[I_sub].set(True) # Sort the ``I_sub`` array with the help", "and verify that it is still larger than the allowed tolerance r_norm_sqr =", "We can stop iterating now. iterations += 1 # %% # CR-Sparse official", "# The non-zero values on the support x_I = solution.x_I print(x_I) # %%", "the solution # It is time to compute the residual after the second", "the help of flags array I_sub, = jnp.where(flags) # Since no atoms have", "K indices in ``I`` include all the indices in ``omega`` jnp.setdiff1d(omega, I) #", "import random import jax.numpy as jnp # Some keys for generating random numbers", "after the second iteration Phi_I = Phi[:, I] r = y - Phi_I", "the algorithm in action. \"\"\" # %% # Let's import necessary libraries import", "float(y.T @ y) r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\") # %% # A boolean array", "dictionary ``Phi`` which has been used for data measurements. * A measurement vector", "that it is now below the tolerance threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") # %%", "# %% # CR-Sparse official implementation # ---------------------------------------- # The JIT compiled version", "'--', label=\"Estimated solution\") plt.legend() # %% # The algorithm has no direct way", "the estimates # with actual ``x0`` and ``omega``. However, note that in the", "crdict.gaussian_mtx(key, M,N) print(Phi.shape) # %% # Coherence of atoms in the sensing matrix", "# Sort the ``I_sub`` array with the help of flags array I_sub, =", "the residual energy and the number of iterations when the algorithm converged. print(solution.r_norm_sqr,", "indeed found the solution # It is time to compute the residual after", "of atoms in the sensing matrix print(crdict.coherence(Phi)) # %% # A sparse model", "residual with the atoms in ``Phi`` h = Phi.T @ r # %%", "Phi[:, I] r = y - Phi_I @ x_I # %% # Compute", "subdictionary x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y) # Pick the indices of", "squares solution of ``Phi[:, I_sub] z = y`` * **Prune**: Pick the largest", "residual energy is above a threshold: * **Match**: Compute the inner product of", "still larger than the allowed tolerance r_norm_sqr = float(r.T @ r) print(f\"{r_norm_sqr=:.2e} >", "print(\"Second iteration:\") # %% # Match the current residual with the atoms in", "for generating random numbers key = random.PRNGKey(0) keys = random.split(key, 4) # For", "check how good we were in picking the correct indices from the actual", "currently selected indices in ``I`` to form ``I_sub``. * **LS**: Compute the least", "the residual after the second iteration Phi_I = Phi[:, I] r = y", "- Phi_I x_I``. It is time to see the algorithm in action. \"\"\"", "algorithm is available in # ``cr.sparse.pursuit.cosamp`` module. # %% # Import the module", "2*K # %% # At any time, up to 3K atoms may be", "# %% # Print the residual energy and the number of iterations when", "to estimate a K-sparse solution ``x`` such that ``y`` is approximately equal to", "= Phi.T @ r # %% # Pick the indices of 2K atoms", "by ``I_sub`` Phi_sub = Phi[:, flags] # %% # Compute the least squares", "``Ia`` to the actual indices of atoms in ``Phi`` I = I_sub[Ia] print(f\"{I=}\")", "to equal the measurements ``y`` r = y # %% # Squared norm/energy", "to see the algorithm in action. \"\"\" # %% # Let's import necessary", "now. iterations += 1 # %% # CR-Sparse official implementation # ---------------------------------------- #", "algorithm proceeds as follows: * Initialize the solution ``x`` with zero. * Maintain", "the solver solution = cosamp.matrix_solve_jit(Phi, y, K) # The support for the sparse", "entries from this least square solution and keep them in ``I``. * **Update", "plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend()", "flags = jnp.zeros(N, dtype=bool) # %% # During the matching steps, 2K atoms", "Phi[:, flags] # %% # Compute the least squares solution of ``y`` over", "flags] # %% # Compute the least squares solution of ``y`` over this", "data measurements. * A measurement vector ``y``. * The sparsity level ``K``. The", "largest matches with the residual I_2k = crs.largest_indices(h, K2 if iterations else K3)", "solution ``x`` such that ``y`` is approximately equal to ``Phi x``. A key", "indices with currently selected indices in ``I`` to form ``I_sub``. * **LS**: Compute", "While the residual energy is above a threshold: * **Match**: Compute the inner", "K2 if iterations else K3) # We can check if these include the", "crs.largest_indices(h, K2 if iterations else K3) # We can check if these include", "cosamp # %% # Run the solver solution = cosamp.matrix_solve_jit(Phi, y, K) #", "Ia = crs.largest_indices(x_sub, K) print(f\"{Ia=}\") # %% # We need to map the", "of measurements M = 128 # Ambient dimension N = 256 # Sparsity", "x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x, '--',", "# '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") # %% # Match the current residual with the", "iteration Phi_I = Phi[:, I] r = y - Phi_I @ x_I #", "signal found = jnp.intersect1d(omega, I) print(\"Found indices: \", found) # %% # We", "= crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0) # %% #", "of knowing that it indeed found the solution # It is time to", "with the atoms in ``Phi`` h = Phi.T @ r # %% #", "atoms indexed by ``I_sub`` Phi_sub = Phi[:, flags] # %% # Compute the", "actual atoms print(\"Found in I_sub: \", jnp.intersect1d(omega, I_sub)) # %% # Indeed we", "is now below the tolerance threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") # %% # We", "``r``. * **Identify**: Select the indices of 2K atoms from ``Phi`` with the", "far iterations = 0 # %% # A limit on the maximum tolerance", "to ``Phi x``. A key quantity in the algorithm is the residual ``r", "100, facecolor='w', edgecolor='k') plt.plot(x0) # %% # ``omega`` contains the set of indices", "# %% # ``omega`` contains the set of indices at which x is", "The CoSaMP algorithm has following inputs: * A sensing matrix or dictionary ``Phi``", "residual ``r``. * **Identify**: Select the indices of 2K atoms from ``Phi`` with", "walk through the steps of CoSaMP algorithm. # Since we have access to", "matches with the residual I_sub = crs.largest_indices(h, K3) # Update the flags array", "y_norm_sqr print(f\"{r_norm_sqr=}\") # %% # A boolean array to track the indices selected", "Second iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") # %% # Match the current residual", "At any time, up to 3K atoms may be selected (after the merge", "missed out in first iteration. print(jnp.intersect1d(omega, I_2k)) # %% # Merge (union) the", "indices in the support. Here are the remaining. missing = jnp.setdiff1d(omega, I) print(\"Missing", "r # %% # Pick the indices of 3K atoms with largest matches", "time, up to 3K atoms may be selected (after the merge step). K3", "r = y # %% # Squared norm/energy of the residual y_norm_sqr =", "# %% # The non-zero values on the support x_I = solution.x_I print(x_I)", "* Initialize the solution ``x`` with zero. * Maintain an index set ``I``", "model # vector is there. # # Initialization # '''''''''''''''''''''''''''''''''''''''''''' # %% #", "``y`` over this subdictionary x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y) # Pick", "I) # %% # Select the corresponding values from the LS solution x_I", "plt.plot(y) # %% # Development of CoSaMP algorithm # --------------------------------- # In the", "merge step). K3 = K + K2 # %% # Number of iterations", "plotting diagrams import matplotlib.pyplot as plt # CR-Sparse modules import cr.sparse as crs", "a threshold: * **Match**: Compute the inner product of each atom in ``Phi``", "# %% # Select the corresponding values from the LS solution x_I =", "can check if these include the atoms missed out in first iteration. print(jnp.intersect1d(omega,", "is above a threshold: * **Match**: Compute the inner product of each atom", "in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(Ia) # %% # We need", "# Ambient dimension N = 256 # Sparsity level K = 8 #", "vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # We can check how", "Initialize the solution ``x`` with zero. * Maintain an index set ``I`` (initially", "atoms may be selected (after the merge step). K3 = K + K2", "solution\") plt.legend() # %% # The algorithm has no direct way of knowing", "indices in ``I`` to form ``I_sub``. * **LS**: Compute the least squares solution", "solution.I print(I) # %% # The non-zero values on the support x_I =", "= y - Phi x`` to equal the measurements ``y`` r = y", "of iterations when the algorithm converged. print(solution.r_norm_sqr, solution.iterations) # %% # Let's plot", "the atoms missed out in first iteration. print(jnp.intersect1d(omega, I_2k)) # %% # Merge", "pick 3K atoms in first iteration. print(f\"{I_sub=}\") # %% # Check which indices", "x0, omega = crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0) #", "I) print(\"Missing indices: \", missing) # %% # It is time to compute", "@ r # %% # Pick the indices of 2K atoms with largest", "2K indices flags = flags.at[I_2k].set(True) I_sub, = jnp.where(flags) print(f\"{I_sub=}\") # %% # We", "time to compute the residual after the first iteration Phi_I = Phi[:, I]", "that it is still larger than the allowed tolerance r_norm_sqr = float(r.T @", "iteration of the algorithm successively improves the estimate ``x`` so that the energy", "# In the following, we walk through the steps of CoSaMP algorithm. #", "# %% # A limit on the maximum tolerance for residual norm res_norm_rtol", "flags.at[I_2k].set(True) I_sub, = jnp.where(flags) print(f\"{I_sub=}\") # %% # We can check if we", "The objective of the algorithm is to estimate a K-sparse solution ``x`` such", "= crdict.gaussian_mtx(key, M,N) print(Phi.shape) # %% # Coherence of atoms in the sensing", "iterations completed so far iterations = 0 # %% # A limit on", "keys = random.split(key, 4) # For plotting diagrams import matplotlib.pyplot as plt #", "iterations += 1 # %% # CR-Sparse official implementation # ---------------------------------------- # The", "y_norm_sqr * (res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\") # %% # First iteration # ''''''''''''''''''''''''''''''''''''''''''''", "actual indices of atoms in ``Phi`` I = I_sub[Ia] print(f\"{I=}\") # %% #", "Initialization # '''''''''''''''''''''''''''''''''''''''''''' # %% # We assume the initial solution to be", "# During the matching steps, 2K atoms will be picked. K2 = 2*K", "least squares solution of ``y`` over this subdictionary x_sub, r_sub_norms, rank_sub, s_sub =", "been used for data measurements. * A measurement vector ``y``. * The sparsity", "import cr.sparse as crs import cr.sparse.dict as crdict import cr.sparse.data as crdata #", "of the solution. * While the residual energy is above a threshold: *", "flags array flags = flags.at[:].set(False) flags = flags.at[I].set(True) print(jnp.where(flags)) # %% # Mark", "action. \"\"\" # %% # Let's import necessary libraries import jax from jax", "indices selected for least squares steps flags = jnp.zeros(N, dtype=bool) # %% #", "steps of CoSaMP algorithm. # Since we have access to ``x0`` and ``omega``,", "print(\"Found in I_sub: \", jnp.intersect1d(omega, I_sub)) # %% # Indeed we did. The", "in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(Ia) # %% # We need to", "with the residual. * **Merge**: merge these 2K indices with currently selected indices", "Phi_sub = Phi[:, flags] # %% # Compute the least squares solution of", "print(f\"{r_norm_sqr=}\") # %% # A boolean array to track the indices selected for", "in ``Phi`` I = I_sub[Ia] print(I) # %% # Check if the final", "iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") # %% # Match the current residual with", "our updated estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w',", "the indices of 2K atoms with largest matches with the residual I_2k =", "# %% # First iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") # %% # Match", "Squared norm/energy of the residual y_norm_sqr = float(y.T @ y) r_norm_sqr = y_norm_sqr", "of the residual y_norm_sqr = float(y.T @ y) r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\") #", "@ x0 plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(y) # %% # Development of", "the subdictionary of ``Phi`` consisting of atoms indexed by ``I_sub`` Phi_sub = Phi[:,", "a K-sparse solution ``x`` such that ``y`` is approximately equal to ``Phi x``.", "(union) the set of previous K indices with the new 2K indices flags", "from jax import random import jax.numpy as jnp # Some keys for generating", "else K3) # We can check if these include the atoms missed out", "s_sub = jnp.linalg.lstsq(Phi_sub, y) # Pick the indices of K largest entries in", "steps, 2K atoms will be picked. K2 = 2*K # %% # At", "the ``I_sub`` array with the help of flags array I_sub, = jnp.where(flags) #", "jnp.setdiff1d(omega, I) print(\"Missing indices: \", missing) # %% # It is time to", "# %% # The Sparsifying Basis # '''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key, M,N) print(Phi.shape)", "print(f\"{I=}\") # %% # Select the corresponding values from the LS solution x_I", "# %% # Second iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") # %% # Match", "selected for least squares steps flags = jnp.zeros(N, dtype=bool) # %% # During", "I_sub)) # %% # Select the subdictionary of ``Phi`` consisting of atoms indexed", "is to estimate a K-sparse solution ``x`` such that ``y`` is approximately equal", "# Since we have access to ``x0`` and ``omega``, we can measure the", "Let's import necessary libraries import jax from jax import random import jax.numpy as", "K largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(Ia) # %%", "turns out that it is now below the tolerance threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\")", "@ x_I # %% # Compute the residual and verify that it is", "print(Phi.shape) # %% # Coherence of atoms in the sensing matrix print(crdict.coherence(Phi)) #", "energy is above a threshold: * **Match**: Compute the inner product of each", "y - Phi x``. Each iteration of the algorithm successively improves the estimate", "x_I # %% # Compute the residual and verify that it is now", "by comparing the estimates # with actual ``x0`` and ``omega``. However, note that", "after the first iteration Phi_I = Phi[:, I] r = y - Phi_I", "dtype=bool) # %% # During the matching steps, 2K atoms will be picked.", "out in first iteration. print(jnp.intersect1d(omega, I_2k)) # %% # Merge (union) the set", "nonzero (support of ``x``) print(omega) # %% # Compressive measurements # '''''''''''''''''''''''''' y", "can measure the # progress made by the algorithm steps by comparing the", "algorithm. # Since we have access to ``x0`` and ``omega``, we can measure", "of 2K atoms from ``Phi`` with the largest correlation with the residual. *", "@ r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") # %% # Store the selected K indices", "need to map the indices in ``Ia`` to the actual indices of atoms", "these include the atoms missed out in first iteration. print(jnp.intersect1d(omega, I_2k)) # %%", "Indeed we did. The set difference is empty. print(\"Missing in I_sub: \", jnp.setdiff1d(omega,", "the set of indices at which x is nonzero (support of ``x``) print(omega)", "# and pick 3K atoms in first iteration. print(f\"{I_sub=}\") # %% # Check", "completed so far iterations = 0 # %% # A limit on the", "than the allowed tolerance r_norm_sqr = float(r.T @ r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") #", "support print(jnp.setdiff1d(omega, I)) # %% # Print the residual energy and the number", "# Here is our updated estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6),", "``omega`` contains the set of indices at which x is nonzero (support of", "energy of the residual ``r`` reduces. The algorithm proceeds as follows: * Initialize", "measurement vector ``y``. * The sparsity level ``K``. The objective of the algorithm", "CoSaMP algorithm. # Since we have access to ``x0`` and ``omega``, we can", "= x_sub[Ia] # %% # Here is our updated estimate of the solution", "actual indices of atoms in ``Phi`` I = I_sub[Ia] print(I) # %% #", "the least squares solution of ``y`` over this subdictionary x_sub, r_sub_norms, rank_sub, s_sub", "r_norm_sqr = float(r.T @ r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") # %% # Store the", "CR-Sparse modules import cr.sparse as crs import cr.sparse.dict as crdict import cr.sparse.data as", "'--', label=\"Estimated solution\") plt.legend() # %% # We can check how good we", "print(omega) # %% # Compressive measurements # '''''''''''''''''''''''''' y = Phi @ x0", "x_sub[Ia] # %% # We now have our first estimate of the solution", "Phi @ x0 plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(y) # %% # Development", "y - Phi_I @ x_I # %% # Compute the residual and verify", "largest correlation with the residual. * **Merge**: merge these 2K indices with currently", "iteration iterations += 1 # %% # Second iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\")", "M,N) print(Phi.shape) # %% # Coherence of atoms in the sensing matrix print(crdict.coherence(Phi))", "# %% # Here is our updated estimate of the solution x =", "3K atoms may be selected (after the merge step). K3 = K +", "Phi x`` to equal the measurements ``y`` r = y # %% #", "\", jnp.setdiff1d(omega, I_sub)) # %% # Select the subdictionary of ``Phi`` consisting of", "# It is time to compute the residual after the second iteration Phi_I", "of this algorithm is available in # ``cr.sparse.pursuit.cosamp`` module. # %% # Import", "up to 3K atoms may be selected (after the merge step). K3 =", "A key quantity in the algorithm is the residual ``r = y -", "\"\"\" # %% # Let's import necessary libraries import jax from jax import", "that it is now below the allowed tolerance r_norm_sqr = float(r.T @ r)", "2) print(f\"{max_r_norm_sqr=:.2e}\") # %% # First iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") # %%", "in ``I``. * **Update residual**: Compute ``r = y - Phi_I x_I``. It", "the subdictionary of ``Phi`` consisting of atoms indexed by I_sub Phi_sub = Phi[:,", "successively improves the estimate ``x`` so that the energy of the residual ``r``", "all the indices in ``omega`` jnp.setdiff1d(omega, I) # %% # Select the corresponding", "------------------ # Number of measurements M = 128 # Ambient dimension N =", "entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(f\"{Ia=}\") # %% # We", "\", jnp.intersect1d(omega, I_sub)) # %% # Indeed we did. The set difference is", "= flags.at[:].set(False) flags = flags.at[I].set(True) print(jnp.where(flags)) # %% # Mark the completion of", "selected so far, we can be more aggressive # and pick 3K atoms", "Phi_I @ x_I # %% # Compute the residual and verify that it", "``omega``, we can measure the # progress made by the algorithm steps by", "residual ``r = y - Phi x``. Each iteration of the algorithm successively", "Phi.T @ r # %% # Pick the indices of 2K atoms with", "``I_sub`` array with the help of flags array I_sub, = jnp.where(flags) # Since", "is time to see the algorithm in action. \"\"\" # %% # Let's", "# %% # Check if the final K indices in ``I`` include all", "2K indices with currently selected indices in ``I`` to form ``I_sub``. * **LS**:", "estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0,", "the module from cr.sparse.pursuit import cosamp # %% # Run the solver solution", "It turns out that it is now below the tolerance threshold print(f\"{r_norm_sqr=:.2e} <", "entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(Ia) # %% # We", "and verify that it is now below the allowed tolerance r_norm_sqr = float(r.T", "numbers key = random.PRNGKey(0) keys = random.split(key, 4) # For plotting diagrams import", "The Sparsifying Basis # '''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key, M,N) print(Phi.shape) # %% #", "matches with the residual I_2k = crs.largest_indices(h, K2 if iterations else K3) #", "= crs.largest_indices(x_sub, K) print(f\"{Ia=}\") # %% # We need to map the indices", "in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(f\"{Ia=}\") # %% # We need", "``I`` to form ``I_sub``. * **LS**: Compute the least squares solution of ``Phi[:,", "the actual indices of atoms in ``Phi`` I = I_sub[Ia] print(f\"{I=}\") # %%", "inputs: * A sensing matrix or dictionary ``Phi`` which has been used for", "%% # Let's import necessary libraries import jax from jax import random import", "step). K3 = K + K2 # %% # Number of iterations completed", "# '''''''''''''''''''''''''' x0, omega = crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')", "``Phi`` I = I_sub[Ia] print(I) # %% # Check if the final K", "that it indeed found the solution # It is time to compute the", "# %% # Squared norm/energy of the residual y_norm_sqr = float(y.T @ y)", "residual and verify that it is now below the allowed tolerance r_norm_sqr =", "the residual I_sub = crs.largest_indices(h, K3) # Update the flags array flags =", "missing = jnp.setdiff1d(omega, I) print(\"Missing indices: \", missing) # %% # It is", "step ========================== This example explains the step by step development of CoSaMP (Compressive", "(initially empty) of atoms selected as part of the solution. * While the", "atoms with largest matches with the residual I_2k = crs.largest_indices(h, K2 if iterations", "facecolor='w', edgecolor='k') plt.plot(x0, label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% #", "%% # CR-Sparse official implementation # ---------------------------------------- # The JIT compiled version of", "estimate a K-sparse solution ``x`` such that ``y`` is approximately equal to ``Phi", "actual ``x0`` and ``omega``. However, note that in the # real implementation of", "y = Phi @ x0 plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(y) # %%", "# Select the subdictionary of ``Phi`` consisting of atoms indexed by I_sub Phi_sub", "below the tolerance threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") # %% # We have completed", "so far, we can be more aggressive # and pick 3K atoms in", "selected K indices in the flags array flags = flags.at[:].set(False) flags = flags.at[I].set(True)", "We assume the initial solution to be zero and # the residual ``r", "(support of ``x``) print(omega) # %% # Compressive measurements # '''''''''''''''''''''''''' y =", "Sort the ``I_sub`` array with the help of flags array I_sub, = jnp.where(flags)", "# At any time, up to 3K atoms may be selected (after the", "A sensing matrix or dictionary ``Phi`` which has been used for data measurements.", "= y - Phi x``. Each iteration of the algorithm successively improves the", "y) r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\") # %% # A boolean array to track", "Matching Pursuit) algorithm for sparse recovery. It then shows how to use the", "= crs.largest_indices(x_sub, K) print(Ia) # %% # We need to map the indices", "label=\"Original vector\") plt.plot(x, '--', label=\"Estimated solution\") plt.legend() # %% # We can check", "of CoSaMP in ``CR-Sparse``. The CoSaMP algorithm has following inputs: * A sensing", "# Let's import necessary libraries import jax from jax import random import jax.numpy", "# %% # Run the solver solution = cosamp.matrix_solve_jit(Phi, y, K) # The", "the matching steps, 2K atoms will be picked. K2 = 2*K # %%", "= float(r.T @ r) # It turns out that it is now below", "square solution and keep them in ``I``. * **Update residual**: Compute ``r =", "``y`` r = y # %% # Squared norm/energy of the residual y_norm_sqr", "- Phi x``. Each iteration of the algorithm successively improves the estimate ``x``", "K indices with the new 2K indices flags = flags.at[I_2k].set(True) I_sub, = jnp.where(flags)", "implementation of the algorithm, no access to original model # vector is there.", "estimates # with actual ``x0`` and ``omega``. However, note that in the #", "%% # We can check how good we were in picking the correct", "atoms in the sensing matrix print(crdict.coherence(Phi)) # %% # A sparse model vector", "will be picked. K2 = 2*K # %% # At any time, up", "indices in ``omega`` jnp.setdiff1d(omega, I) # %% # Select the corresponding values from", "stop iterating now. iterations += 1 # %% # CR-Sparse official implementation #", "matrix or dictionary ``Phi`` which has been used for data measurements. * A", "Number of measurements M = 128 # Ambient dimension N = 256 #", "solution # It is time to compute the residual after the second iteration", "the actual indices of atoms in ``Phi`` I = I_sub[Ia] print(I) # %%", "# Verify that we successfully recovered the support print(jnp.setdiff1d(omega, I)) # %% #", "energy and the number of iterations when the algorithm converged. print(solution.r_norm_sqr, solution.iterations) #", "``CR-Sparse``. The CoSaMP algorithm has following inputs: * A sensing matrix or dictionary", "in ``Phi`` with the current residual ``r``. * **Identify**: Select the indices of", "compiled version of this algorithm is available in # ``cr.sparse.pursuit.cosamp`` module. # %%", "x_sub[Ia] # %% # Here is our updated estimate of the solution x", "converged. print(solution.r_norm_sqr, solution.iterations) # %% # Let's plot the solution x = jnp.zeros(N).at[I].set(x_I)", "matrix print(crdict.coherence(Phi)) # %% # A sparse model vector # '''''''''''''''''''''''''' x0, omega", "I_sub: \", jnp.intersect1d(omega, I_sub)) # %% # Indeed we did. The set difference", "Import the module from cr.sparse.pursuit import cosamp # %% # Run the solver", "= 2*K # %% # At any time, up to 3K atoms may", "of atoms in ``Phi`` I = I_sub[Ia] print(I) # %% # Check if", "atoms have been selected so far, we can be more aggressive # and", "include all the indices in ``omega`` jnp.setdiff1d(omega, I) # %% # Select the", "with largest matches with the residual I_sub = crs.largest_indices(h, K3) # Update the", "plt.legend() # %% # The algorithm has no direct way of knowing that", "The algorithm has no direct way of knowing that it indeed found the", "from ``omega`` are there in ``I_sub``. print(jnp.intersect1d(omega, I_sub)) # %% # Select the", "with the largest correlation with the residual. * **Merge**: merge these 2K indices", "I_2k = crs.largest_indices(h, K2 if iterations else K3) # We can check if", "= y # %% # Squared norm/energy of the residual y_norm_sqr = float(y.T", "= crs.largest_indices(h, K3) # Update the flags array flags = flags.at[I_sub].set(True) # Sort", "the measurements ``y`` r = y # %% # Squared norm/energy of the", "good we were in picking the correct indices from the actual support of", "Sparsifying Basis # '''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key, M,N) print(Phi.shape) # %% # Coherence", "module. # %% # Import the module from cr.sparse.pursuit import cosamp # %%", "has following inputs: * A sensing matrix or dictionary ``Phi`` which has been", "matplotlib.pyplot as plt # CR-Sparse modules import cr.sparse as crs import cr.sparse.dict as", "# vector is there. # # Initialization # '''''''''''''''''''''''''''''''''''''''''''' # %% # We", "is now below the allowed tolerance r_norm_sqr = float(r.T @ r) # It", "jnp.linalg.lstsq(Phi_sub, y) # Pick the indices of K largest entries in in ``x_sub``", "``I``. * **Update residual**: Compute ``r = y - Phi_I x_I``. It is", "import cr.sparse.data as crdata # %% # Problem Setup # ------------------ # Number", "indices of 3K atoms with largest matches with the residual I_sub = crs.largest_indices(h,", "K largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(f\"{Ia=}\") # %%", "# %% # The algorithm has no direct way of knowing that it", "how to use the official implementation of CoSaMP in ``CR-Sparse``. The CoSaMP algorithm", "= crs.largest_indices(h, K2 if iterations else K3) # We can check if these", "plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0) # %% # ``omega`` contains the set", "**LS**: Compute the least squares solution of ``Phi[:, I_sub] z = y`` *", "r_norm_sqr = y_norm_sqr print(f\"{r_norm_sqr=}\") # %% # A boolean array to track the", "such that ``y`` is approximately equal to ``Phi x``. A key quantity in", "indices of K largest entries in in ``x_sub`` Ia = crs.largest_indices(x_sub, K) print(Ia)", "# %% # Compressive measurements # '''''''''''''''''''''''''' y = Phi @ x0 plt.figure(figsize=(8,6),", "libraries import jax from jax import random import jax.numpy as jnp # Some", "K) print(f\"{Ia=}\") # %% # We need to map the indices in ``Ia``", "We now have our first estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6),", "boolean array to track the indices selected for least squares steps flags =", "zero. * Maintain an index set ``I`` (initially empty) of atoms selected as", "Check which indices from ``omega`` are there in ``I_sub``. print(jnp.intersect1d(omega, I_sub)) # %%", "Pick the indices of 3K atoms with largest matches with the residual I_sub", "y`` * **Prune**: Pick the largest K entries from this least square solution", "from ``Phi`` with the largest correlation with the residual. * **Merge**: merge these", "model vector # '''''''''''''''''''''''''' x0, omega = crdata.sparse_normal_representations(key, N, K) plt.figure(figsize=(8,6), dpi= 100,", "remaining. missing = jnp.setdiff1d(omega, I) print(\"Missing indices: \", missing) # %% # It", "when the algorithm converged. print(solution.r_norm_sqr, solution.iterations) # %% # Let's plot the solution", "steps by comparing the estimates # with actual ``x0`` and ``omega``. However, note", "compute the residual after the second iteration Phi_I = Phi[:, I] r =", "However, note that in the # real implementation of the algorithm, no access", "atoms in first iteration. print(f\"{I_sub=}\") # %% # Check which indices from ``omega``", "'''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") # %% # Match the current residual with the atoms", "for least squares steps flags = jnp.zeros(N, dtype=bool) # %% # During the", "of indices at which x is nonzero (support of ``x``) print(omega) # %%", "# Number of iterations completed so far iterations = 0 # %% #", "import matplotlib.pyplot as plt # CR-Sparse modules import cr.sparse as crs import cr.sparse.dict", "solution x_I = x_sub[Ia] # %% # We now have our first estimate", "x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y) # Pick the indices of K", "x_I = x_sub[Ia] # %% # We now have our first estimate of", "have our first estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100,", "crdata # %% # Problem Setup # ------------------ # Number of measurements M", "solution.iterations) # %% # Let's plot the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi=", "the indices selected for least squares steps flags = jnp.zeros(N, dtype=bool) # %%", "may be selected (after the merge step). K3 = K + K2 #", "necessary libraries import jax from jax import random import jax.numpy as jnp #", "tolerance threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") # %% # We have completed the signal", "atoms with largest matches with the residual I_sub = crs.largest_indices(h, K3) # Update", "# Since no atoms have been selected so far, we can be more", "%% # A limit on the maximum tolerance for residual norm res_norm_rtol =", "# Merge (union) the set of previous K indices with the new 2K", "atoms in ``Phi`` I = I_sub[Ia] print(I) # %% # Check if the", "measurements # '''''''''''''''''''''''''' y = Phi @ x0 plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')", "quantity in the algorithm is the residual ``r = y - Phi x``.", "from the LS solution x_I = x_sub[Ia] # %% # We now have", "print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") # %% # We have completed the signal recovery. We", "CoSaMP in ``CR-Sparse``. The CoSaMP algorithm has following inputs: * A sensing matrix", "= y - Phi_I x_I``. It is time to see the algorithm in", "Check if the final K indices in ``I`` include all the indices in", "= random.split(key, 4) # For plotting diagrams import matplotlib.pyplot as plt # CR-Sparse", "# We can check if these include the atoms missed out in first", "# %% # It is time to compute the residual after the first", "# The JIT compiled version of this algorithm is available in # ``cr.sparse.pursuit.cosamp``", "indexed by I_sub Phi_sub = Phi[:, flags] # %% # Compute the least", "0 # %% # A limit on the maximum tolerance for residual norm", "the algorithm, no access to original model # vector is there. # #", "found 6 out of 8 indices in the support. Here are the remaining.", "the iteration iterations += 1 # %% # Second iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second", "from this least square solution and keep them in ``I``. * **Update residual**:", "%% # Squared norm/energy of the residual y_norm_sqr = float(y.T @ y) r_norm_sqr", "# %% # Verify that we successfully recovered the support print(jnp.setdiff1d(omega, I)) #", "flags = flags.at[:].set(False) flags = flags.at[I].set(True) print(jnp.where(flags)) # %% # Mark the completion", "residual after the second iteration Phi_I = Phi[:, I] r = y -", "we can measure the # progress made by the algorithm steps by comparing", "atoms from ``Phi`` with the largest correlation with the residual. * **Merge**: merge", "= flags.at[I_sub].set(True) # Sort the ``I_sub`` array with the help of flags array", "y, K) # The support for the sparse solution I = solution.I print(I)", "I)) # %% # Print the residual energy and the number of iterations", "residual I_sub = crs.largest_indices(h, K3) # Update the flags array flags = flags.at[I_sub].set(True)", "solution ``x`` with zero. * Maintain an index set ``I`` (initially empty) of", "%% # Mark the completion of the iteration iterations += 1 # %%", "indices with the new 2K indices flags = flags.at[I_2k].set(True) I_sub, = jnp.where(flags) print(f\"{I_sub=}\")", "iterations else K3) # We can check if these include the atoms missed", "3K atoms in first iteration. print(f\"{I_sub=}\") # %% # Check which indices from", "# %% # Pick the indices of 3K atoms with largest matches with", "with zero. * Maintain an index set ``I`` (initially empty) of atoms selected", "the indices of 3K atoms with largest matches with the residual I_sub =", "so far iterations = 0 # %% # A limit on the maximum", "the sensing matrix print(crdict.coherence(Phi)) # %% # A sparse model vector # ''''''''''''''''''''''''''", "the residual ``r = y - Phi x``. Each iteration of the algorithm", "that the energy of the residual ``r`` reduces. The algorithm proceeds as follows:", "by step development of CoSaMP (Compressive Sensing Matching Pursuit) algorithm for sparse recovery.", "verify that it is now below the allowed tolerance r_norm_sqr = float(r.T @", "the remaining. missing = jnp.setdiff1d(omega, I) print(\"Missing indices: \", missing) # %% #", "# Compressive measurements # '''''''''''''''''''''''''' y = Phi @ x0 plt.figure(figsize=(8,6), dpi= 100,", "# %% # Let's import necessary libraries import jax from jax import random", "in I_sub: \", jnp.intersect1d(omega, I_sub)) # %% # Indeed we did. The set", "r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y) # Pick the indices of K largest", "6 out of 8 indices in the support. Here are the remaining. missing", "successfully recovered the support print(jnp.setdiff1d(omega, I)) # %% # Print the residual energy", "sensing matrix or dictionary ``Phi`` which has been used for data measurements. *", "sensing matrix print(crdict.coherence(Phi)) # %% # A sparse model vector # '''''''''''''''''''''''''' x0,", "I_sub, = jnp.where(flags) # Since no atoms have been selected so far, we", "# First iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") # %% # Match the current", "# A sparse model vector # '''''''''''''''''''''''''' x0, omega = crdata.sparse_normal_representations(key, N, K)", "assume the initial solution to be zero and # the residual ``r =", "as jnp # Some keys for generating random numbers key = random.PRNGKey(0) keys", "found all the actual atoms print(\"Found in I_sub: \", jnp.intersect1d(omega, I_sub)) # %%", "# %% # During the matching steps, 2K atoms will be picked. K2", "---------------------------------------- # The JIT compiled version of this algorithm is available in #", "%% # The non-zero values on the support x_I = solution.x_I print(x_I) #", "limit on the maximum tolerance for residual norm res_norm_rtol = 1e-3 max_r_norm_sqr =", "Compressive measurements # '''''''''''''''''''''''''' y = Phi @ x0 plt.figure(figsize=(8,6), dpi= 100, facecolor='w',", "The JIT compiled version of this algorithm is available in # ``cr.sparse.pursuit.cosamp`` module.", "the residual ``r = y - Phi x`` to equal the measurements ``y``", "that ``y`` is approximately equal to ``Phi x``. A key quantity in the", "in first iteration. print(jnp.intersect1d(omega, I_2k)) # %% # Merge (union) the set of", "consisting of atoms indexed by I_sub Phi_sub = Phi[:, flags] # %% #", "%% # Select the subdictionary of ``Phi`` consisting of atoms indexed by ``I_sub``", "algorithm # --------------------------------- # In the following, we walk through the steps of", "# '''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key, M,N) print(Phi.shape) # %% # Coherence of atoms", "residual ``r = y - Phi x`` to equal the measurements ``y`` r", "the energy of the residual ``r`` reduces. The algorithm proceeds as follows: *", "set ``I`` (initially empty) of atoms selected as part of the solution. *", "print(jnp.intersect1d(omega, I_sub)) # %% # Select the subdictionary of ``Phi`` consisting of atoms", "%% # We now have our first estimate of the solution x =", "print(f\"{max_r_norm_sqr=:.2e}\") # %% # First iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") # %% #", "of atoms selected as part of the solution. * While the residual energy", "# %% # We can check if we found all the actual atoms", "this least square solution and keep them in ``I``. * **Update residual**: Compute", "them in ``I``. * **Update residual**: Compute ``r = y - Phi_I x_I``.", "I_sub, = jnp.where(flags) print(f\"{I_sub=}\") # %% # We can check if we found", "K-sparse solution ``x`` such that ``y`` is approximately equal to ``Phi x``. A", "%% # Pick the indices of 3K atoms with largest matches with the", "# real implementation of the algorithm, no access to original model # vector", "level K = 8 # %% # The Sparsifying Basis # '''''''''''''''''''''''''' Phi", "h = Phi.T @ r # %% # Pick the indices of 2K", "2K atoms from ``Phi`` with the largest correlation with the residual. * **Merge**:", "= jnp.setdiff1d(omega, I) print(\"Missing indices: \", missing) # %% # It is time", "Compute the least squares solution of ``Phi[:, I_sub] z = y`` * **Prune**:", "current residual ``r``. * **Identify**: Select the indices of 2K atoms from ``Phi``", "Compute the inner product of each atom in ``Phi`` with the current residual", "= jnp.where(flags) # Since no atoms have been selected so far, we can", "1 # %% # Second iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") # %% #", "the solution. * While the residual energy is above a threshold: * **Match**:", "algorithm has no direct way of knowing that it indeed found the solution", "values on the support x_I = solution.x_I print(x_I) # %% # Verify that", "vector ``y``. * The sparsity level ``K``. The objective of the algorithm is", "K entries from this least square solution and keep them in ``I``. *", "# The Sparsifying Basis # '''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key, M,N) print(Phi.shape) # %%", "this subdictionary x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y) # Pick the indices", "Ia = crs.largest_indices(x_sub, K) print(Ia) # %% # We need to map the", "** 2) print(f\"{max_r_norm_sqr=:.2e}\") # %% # First iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"First iteration:\") #", "empty) of atoms selected as part of the solution. * While the residual", "of the residual ``r`` reduces. The algorithm proceeds as follows: * Initialize the", "jax.numpy as jnp # Some keys for generating random numbers key = random.PRNGKey(0)", "100, facecolor='w', edgecolor='k') plt.plot(y) # %% # Development of CoSaMP algorithm # ---------------------------------", "iterations += 1 # %% # Second iteration # '''''''''''''''''''''''''''''''''''''''''''' print(\"Second iteration:\") #", "threshold print(f\"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}\") # %% # We have completed the signal recovery.", "cosamp.matrix_solve_jit(Phi, y, K) # The support for the sparse solution I = solution.I", "> {max_r_norm_sqr=:.2e}\") # %% # Store the selected K indices in the flags", "recovery. It then shows how to use the official implementation of CoSaMP in", "r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") # %% # Store the selected K indices in", "= 128 # Ambient dimension N = 256 # Sparsity level K =", "* **Update residual**: Compute ``r = y - Phi_I x_I``. It is time", "new 2K indices flags = flags.at[I_2k].set(True) I_sub, = jnp.where(flags) print(f\"{I_sub=}\") # %% #", "%% # Select the subdictionary of ``Phi`` consisting of atoms indexed by I_sub", "CoSaMP step by step ========================== This example explains the step by step development", "to use the official implementation of CoSaMP in ``CR-Sparse``. The CoSaMP algorithm has", "each atom in ``Phi`` with the current residual ``r``. * **Identify**: Select the", "measurements M = 128 # Ambient dimension N = 256 # Sparsity level", "\", missing) # %% # It is time to compute the residual after", "# ``cr.sparse.pursuit.cosamp`` module. # %% # Import the module from cr.sparse.pursuit import cosamp", "set difference is empty. print(\"Missing in I_sub: \", jnp.setdiff1d(omega, I_sub)) # %% #", "step by step ========================== This example explains the step by step development of", "this algorithm is available in # ``cr.sparse.pursuit.cosamp`` module. # %% # Import the", "# Store the selected K indices in the flags array flags = flags.at[:].set(False)", "out of 8 indices in the support. Here are the remaining. missing =", "dpi= 100, facecolor='w', edgecolor='k') plt.plot(y) # %% # Development of CoSaMP algorithm #", "%% # It is time to compute the residual after the first iteration", "# Compute the residual and verify that it is still larger than the", "y # %% # Squared norm/energy of the residual y_norm_sqr = float(y.T @", "and ``omega``, we can measure the # progress made by the algorithm steps", "following inputs: * A sensing matrix or dictionary ``Phi`` which has been used", "to map the indices in ``Ia`` to the actual indices of atoms in", "import cosamp # %% # Run the solver solution = cosamp.matrix_solve_jit(Phi, y, K)", "in ``Ia`` to the actual indices of atoms in ``Phi`` I = I_sub[Ia]", "recovered the support print(jnp.setdiff1d(omega, I)) # %% # Print the residual energy and", "%% # The algorithm has no direct way of knowing that it indeed", "keep them in ``I``. * **Update residual**: Compute ``r = y - Phi_I", "the selected K indices in the flags array flags = flags.at[:].set(False) flags =", "K2 = 2*K # %% # At any time, up to 3K atoms", "the allowed tolerance r_norm_sqr = float(r.T @ r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\") # %%", "1e-3 max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\") # %% # First", "as crdict import cr.sparse.data as crdata # %% # Problem Setup # ------------------", "A limit on the maximum tolerance for residual norm res_norm_rtol = 1e-3 max_r_norm_sqr", "< {max_r_norm_sqr=:.2e}\") # %% # We have completed the signal recovery. We can", "# We now have our first estimate of the solution x = jnp.zeros(N).at[I].set(x_I)", "available in # ``cr.sparse.pursuit.cosamp`` module. # %% # Import the module from cr.sparse.pursuit", "* **Prune**: Pick the largest K entries from this least square solution and", "dpi= 100, facecolor='w', edgecolor='k') plt.plot(x0) # %% # ``omega`` contains the set of", "\", found) # %% # We found 6 out of 8 indices in", "indices: \", found) # %% # We found 6 out of 8 indices", "%% # We need to map the indices in ``Ia`` to the actual", "It is time to compute the residual after the second iteration Phi_I =", "``I_sub``. * **LS**: Compute the least squares solution of ``Phi[:, I_sub] z =", "to the actual indices of atoms in ``Phi`` I = I_sub[Ia] print(I) #", "**Prune**: Pick the largest K entries from this least square solution and keep", "larger than the allowed tolerance r_norm_sqr = float(r.T @ r) print(f\"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}\")", "# CR-Sparse modules import cr.sparse as crs import cr.sparse.dict as crdict import cr.sparse.data", "``x_sub`` Ia = crs.largest_indices(x_sub, K) print(f\"{Ia=}\") # %% # We need to map", "``I_sub``. print(jnp.intersect1d(omega, I_sub)) # %% # Select the subdictionary of ``Phi`` consisting of", "I_sub Phi_sub = Phi[:, flags] # %% # Compute the least squares solution", "with currently selected indices in ``I`` to form ``I_sub``. * **LS**: Compute the", "flags array I_sub, = jnp.where(flags) # Since no atoms have been selected so", "the residual and verify that it is now below the allowed tolerance r_norm_sqr", "%% # Match the current residual with the atoms in ``Phi`` h =", "atoms in ``Phi`` I = I_sub[Ia] print(f\"{I=}\") # %% # Select the corresponding", "r) # It turns out that it is now below the tolerance threshold", "8 # %% # The Sparsifying Basis # '''''''''''''''''''''''''' Phi = crdict.gaussian_mtx(key, M,N)", "**Identify**: Select the indices of 2K atoms from ``Phi`` with the largest correlation", "= y_norm_sqr * (res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\") # %% # First iteration #", "comparing the estimates # with actual ``x0`` and ``omega``. However, note that in", "# Check which indices from ``omega`` are there in ``I_sub``. print(jnp.intersect1d(omega, I_sub)) #", "= 1e-3 max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2) print(f\"{max_r_norm_sqr=:.2e}\") # %% #", "* **LS**: Compute the least squares solution of ``Phi[:, I_sub] z = y``", "'''''''''''''''''''''''''''''''''''''''''''' # %% # We assume the initial solution to be zero and", "follows: * Initialize the solution ``x`` with zero. * Maintain an index set", "I_2k)) # %% # Merge (union) the set of previous K indices with", "Ambient dimension N = 256 # Sparsity level K = 8 # %%", "selected indices in ``I`` to form ``I_sub``. * **LS**: Compute the least squares", "# %% # Select the subdictionary of ``Phi`` consisting of atoms indexed by", "squares solution of ``y`` over this subdictionary x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub,", "Some keys for generating random numbers key = random.PRNGKey(0) keys = random.split(key, 4)", "It then shows how to use the official implementation of CoSaMP in ``CR-Sparse``.", "final K indices in ``I`` include all the indices in ``omega`` jnp.setdiff1d(omega, I)", "``omega``. However, note that in the # real implementation of the algorithm, no", "support x_I = solution.x_I print(x_I) # %% # Verify that we successfully recovered", "sparse recovery. It then shows how to use the official implementation of CoSaMP", "We have completed the signal recovery. We can stop iterating now. iterations +=", "updated estimate of the solution x = jnp.zeros(N).at[I].set(x_I) plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')", "%% # ``omega`` contains the set of indices at which x is nonzero" ]
[ "= [] for dockerfile, violationFile in tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True)", "0 position_dist[p.position] += 1 if p.rule not in rule_dist: rule_dist[p.rule] = 0 rule_dist[p.rule]", "stats = patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats) for s in all_stats: print(s) with", "all_stats)) fixed = sum(map(lambda it: it.fixed, all_stats)) unfixed = sum(map(lambda it: it.unfixed, all_stats))", "[] for dockerfile, violationFile in tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats)", "stat.patches] position_dist = {} rule_dist = {} for p in verified_patches: if p.position", "files should be examined [0,100] LIMIT = None def evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir())", "import datetime from tqdm import tqdm from dfp_main import patch, PatchStats, setVerbose TEST_SET_PATH", "be examined [0,100] LIMIT = None def evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs =", "p in stat.patches] position_dist = {} rule_dist = {} for p in verified_patches:", "test files should be examined [0,100] LIMIT = None def evaluateTestSet(): testFiles =", "open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda it: it.time, all_stats))", "= {} rule_dist = {} for p in verified_patches: if p.position not in", "if p.position not in position_dist: position_dist[p.position] = 0 position_dist[p.position] += 1 if p.rule", "tqdm from dfp_main import patch, PatchStats, setVerbose TEST_SET_PATH = \"testSet\" # How many", "with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda it: it.time,", "i in range(0, len(testFiles), 2)] all_stats = [] for dockerfile, violationFile in tqdm(testPairs[:LIMIT]):", "position_dist[p.position] = 0 position_dist[p.position] += 1 if p.rule not in rule_dist: rule_dist[p.rule] =", "How many test files should be examined [0,100] LIMIT = None def evaluateTestSet():", "sum(map(lambda it: it.unfixed, all_stats)) verified_patches = [p for stat in all_stats for p", "import tqdm from dfp_main import patch, PatchStats, setVerbose TEST_SET_PATH = \"testSet\" # How", "tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats) for s in all_stats: print(s)", "\"testSet\" # How many test files should be examined [0,100] LIMIT = None", "it.unfixed, all_stats)) verified_patches = [p for stat in all_stats for p in stat.patches]", "range(0, len(testFiles), 2)] all_stats = [] for dockerfile, violationFile in tqdm(testPairs[:LIMIT]): stats =", "sum(map(lambda it: it.total, all_stats)) fixed = sum(map(lambda it: it.fixed, all_stats)) unfixed = sum(map(lambda", "examined [0,100] LIMIT = None def evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i],", "= 0 position_dist[p.position] += 1 if p.rule not in rule_dist: rule_dist[p.rule] = 0", "it.fixed, all_stats)) unfixed = sum(map(lambda it: it.unfixed, all_stats)) verified_patches = [p for stat", "datetime import datetime from tqdm import tqdm from dfp_main import patch, PatchStats, setVerbose", "= patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats) for s in all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\",", "it.time, all_stats)) avg_time = sum(times) / len(times) total = sum(map(lambda it: it.total, all_stats))", "all_stats)) verified_patches = [p for stat in all_stats for p in stat.patches] position_dist", "from pathlib import Path from datetime import datetime from tqdm import tqdm from", "violationFile in tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats) for s in", "Path from datetime import datetime from tqdm import tqdm from dfp_main import patch,", "from dfp_main import patch, PatchStats, setVerbose TEST_SET_PATH = \"testSet\" # How many test", "/ len(times) total = sum(map(lambda it: it.total, all_stats)) fixed = sum(map(lambda it: it.fixed,", "sum(map(lambda it: it.fixed, all_stats)) unfixed = sum(map(lambda it: it.unfixed, all_stats)) verified_patches = [p", "p.position not in position_dist: position_dist[p.position] = 0 position_dist[p.position] += 1 if p.rule not", "in stat.patches] position_dist = {} rule_dist = {} for p in verified_patches: if", "len(times) total = sum(map(lambda it: it.total, all_stats)) fixed = sum(map(lambda it: it.fixed, all_stats))", "pickle from pathlib import Path from datetime import datetime from tqdm import tqdm", "{} for p in verified_patches: if p.position not in position_dist: position_dist[p.position] = 0", "in verified_patches: if p.position not in position_dist: position_dist[p.position] = 0 position_dist[p.position] += 1", "in range(0, len(testFiles), 2)] all_stats = [] for dockerfile, violationFile in tqdm(testPairs[:LIMIT]): stats", "dockerfile, violationFile in tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats) for s", "LIMIT = None def evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i], testFiles[i +", "= sum(map(lambda it: it.total, all_stats)) fixed = sum(map(lambda it: it.fixed, all_stats)) unfixed =", "setVerbose TEST_SET_PATH = \"testSet\" # How many test files should be examined [0,100]", "\"wb\") as f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda it: it.time, all_stats)) avg_time", "if p.rule not in rule_dist: rule_dist[p.rule] = 0 rule_dist[p.rule] += 1 setVerbose(True) PatchStats(total,", "p.rule not in rule_dist: rule_dist[p.rule] = 0 rule_dist[p.rule] += 1 setVerbose(True) PatchStats(total, fixed,", "+= 1 setVerbose(True) PatchStats(total, fixed, unfixed).print() print(f\"Average time: {avg_time}s\") print(f\"Position distribution: {position_dist}\") print(f\"Rule", "len(testFiles), 2)] all_stats = [] for dockerfile, violationFile in tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile),", "testPairs = [(testFiles[i], testFiles[i + 1]) for i in range(0, len(testFiles), 2)] all_stats", "[0,100] LIMIT = None def evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i], testFiles[i", "= list(map(lambda it: it.time, all_stats)) avg_time = sum(times) / len(times) total = sum(map(lambda", "print(f\"Average time: {avg_time}s\") print(f\"Position distribution: {position_dist}\") print(f\"Rule distribution: {rule_dist}\") if __name__ == \"__main__\":", "it: it.fixed, all_stats)) unfixed = sum(map(lambda it: it.unfixed, all_stats)) verified_patches = [p for", "[p for stat in all_stats for p in stat.patches] position_dist = {} rule_dist", "verified_patches: if p.position not in position_dist: position_dist[p.position] = 0 position_dist[p.position] += 1 if", "= list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i], testFiles[i + 1]) for i in range(0, len(testFiles),", "for p in stat.patches] position_dist = {} rule_dist = {} for p in", "in all_stats for p in stat.patches] position_dist = {} rule_dist = {} for", "position_dist[p.position] += 1 if p.rule not in rule_dist: rule_dist[p.rule] = 0 rule_dist[p.rule] +=", "rule_dist: rule_dist[p.rule] = 0 rule_dist[p.rule] += 1 setVerbose(True) PatchStats(total, fixed, unfixed).print() print(f\"Average time:", "from tqdm import tqdm from dfp_main import patch, PatchStats, setVerbose TEST_SET_PATH = \"testSet\"", "it: it.total, all_stats)) fixed = sum(map(lambda it: it.fixed, all_stats)) unfixed = sum(map(lambda it:", "+= 1 if p.rule not in rule_dist: rule_dist[p.rule] = 0 rule_dist[p.rule] += 1", "[(testFiles[i], testFiles[i + 1]) for i in range(0, len(testFiles), 2)] all_stats = []", "many test files should be examined [0,100] LIMIT = None def evaluateTestSet(): testFiles", "patch, PatchStats, setVerbose TEST_SET_PATH = \"testSet\" # How many test files should be", "+ 1]) for i in range(0, len(testFiles), 2)] all_stats = [] for dockerfile,", "list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i], testFiles[i + 1]) for i in range(0, len(testFiles), 2)]", "rule_dist[p.rule] += 1 setVerbose(True) PatchStats(total, fixed, unfixed).print() print(f\"Average time: {avg_time}s\") print(f\"Position distribution: {position_dist}\")", "# How many test files should be examined [0,100] LIMIT = None def", "for s in all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL)", "rule_dist = {} for p in verified_patches: if p.position not in position_dist: position_dist[p.position]", "all_stats)) avg_time = sum(times) / len(times) total = sum(map(lambda it: it.total, all_stats)) fixed", "unfixed = sum(map(lambda it: it.unfixed, all_stats)) verified_patches = [p for stat in all_stats", "TEST_SET_PATH = \"testSet\" # How many test files should be examined [0,100] LIMIT", "evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i], testFiles[i + 1]) for i in", "= sum(map(lambda it: it.fixed, all_stats)) unfixed = sum(map(lambda it: it.unfixed, all_stats)) verified_patches =", "list(map(lambda it: it.time, all_stats)) avg_time = sum(times) / len(times) total = sum(map(lambda it:", "all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda", "{} rule_dist = {} for p in verified_patches: if p.position not in position_dist:", "PatchStats, setVerbose TEST_SET_PATH = \"testSet\" # How many test files should be examined", "testFiles[i + 1]) for i in range(0, len(testFiles), 2)] all_stats = [] for", "= [(testFiles[i], testFiles[i + 1]) for i in range(0, len(testFiles), 2)] all_stats =", "= 0 rule_dist[p.rule] += 1 setVerbose(True) PatchStats(total, fixed, unfixed).print() print(f\"Average time: {avg_time}s\") print(f\"Position", "s in all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times", "PatchStats(total, fixed, unfixed).print() print(f\"Average time: {avg_time}s\") print(f\"Position distribution: {position_dist}\") print(f\"Rule distribution: {rule_dist}\") if", "= {} for p in verified_patches: if p.position not in position_dist: position_dist[p.position] =", "in position_dist: position_dist[p.position] = 0 position_dist[p.position] += 1 if p.rule not in rule_dist:", "as f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda it: it.time, all_stats)) avg_time =", "dfp_main import patch, PatchStats, setVerbose TEST_SET_PATH = \"testSet\" # How many test files", "import Path from datetime import datetime from tqdm import tqdm from dfp_main import", "quiet=True) all_stats.append(stats) for s in all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f: pickle.dump(all_stats,", "avg_time = sum(times) / len(times) total = sum(map(lambda it: it.total, all_stats)) fixed =", "fixed = sum(map(lambda it: it.fixed, all_stats)) unfixed = sum(map(lambda it: it.unfixed, all_stats)) verified_patches", "p in verified_patches: if p.position not in position_dist: position_dist[p.position] = 0 position_dist[p.position] +=", "setVerbose(True) PatchStats(total, fixed, unfixed).print() print(f\"Average time: {avg_time}s\") print(f\"Position distribution: {position_dist}\") print(f\"Rule distribution: {rule_dist}\")", "pathlib import Path from datetime import datetime from tqdm import tqdm from dfp_main", "datetime from tqdm import tqdm from dfp_main import patch, PatchStats, setVerbose TEST_SET_PATH =", "all_stats)) unfixed = sum(map(lambda it: it.unfixed, all_stats)) verified_patches = [p for stat in", "from datetime import datetime from tqdm import tqdm from dfp_main import patch, PatchStats,", "= sum(times) / len(times) total = sum(map(lambda it: it.total, all_stats)) fixed = sum(map(lambda", "patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats) for s in all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\")", "for p in verified_patches: if p.position not in position_dist: position_dist[p.position] = 0 position_dist[p.position]", "2)] all_stats = [] for dockerfile, violationFile in tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile), str(violationFile),", "in tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats) for s in all_stats:", "for stat in all_stats for p in stat.patches] position_dist = {} rule_dist =", "f, protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda it: it.time, all_stats)) avg_time = sum(times) / len(times)", "= \"testSet\" # How many test files should be examined [0,100] LIMIT =", "all_stats = [] for dockerfile, violationFile in tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile), str(violationFile), \"hadolint.exe\",", "\"hadolint.exe\", quiet=True) all_stats.append(stats) for s in all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f:", "1 setVerbose(True) PatchStats(total, fixed, unfixed).print() print(f\"Average time: {avg_time}s\") print(f\"Position distribution: {position_dist}\") print(f\"Rule distribution:", "= sum(map(lambda it: it.unfixed, all_stats)) verified_patches = [p for stat in all_stats for", "str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats) for s in all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as", "it: it.unfixed, all_stats)) verified_patches = [p for stat in all_stats for p in", "= [p for stat in all_stats for p in stat.patches] position_dist = {}", "in all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times =", "not in position_dist: position_dist[p.position] = 0 position_dist[p.position] += 1 if p.rule not in", "not in rule_dist: rule_dist[p.rule] = 0 rule_dist[p.rule] += 1 setVerbose(True) PatchStats(total, fixed, unfixed).print()", "def evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i], testFiles[i + 1]) for i", "1]) for i in range(0, len(testFiles), 2)] all_stats = [] for dockerfile, violationFile", "times = list(map(lambda it: it.time, all_stats)) avg_time = sum(times) / len(times) total =", "sum(times) / len(times) total = sum(map(lambda it: it.total, all_stats)) fixed = sum(map(lambda it:", "fixed, unfixed).print() print(f\"Average time: {avg_time}s\") print(f\"Position distribution: {position_dist}\") print(f\"Rule distribution: {rule_dist}\") if __name__", "1 if p.rule not in rule_dist: rule_dist[p.rule] = 0 rule_dist[p.rule] += 1 setVerbose(True)", "unfixed).print() print(f\"Average time: {avg_time}s\") print(f\"Position distribution: {position_dist}\") print(f\"Rule distribution: {rule_dist}\") if __name__ ==", "0 rule_dist[p.rule] += 1 setVerbose(True) PatchStats(total, fixed, unfixed).print() print(f\"Average time: {avg_time}s\") print(f\"Position distribution:", "stat in all_stats for p in stat.patches] position_dist = {} rule_dist = {}", "for i in range(0, len(testFiles), 2)] all_stats = [] for dockerfile, violationFile in", "total = sum(map(lambda it: it.total, all_stats)) fixed = sum(map(lambda it: it.fixed, all_stats)) unfixed", "f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda it: it.time, all_stats)) avg_time = sum(times)", "time: {avg_time}s\") print(f\"Position distribution: {position_dist}\") print(f\"Rule distribution: {rule_dist}\") if __name__ == \"__main__\": evaluateTestSet()", "testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i], testFiles[i + 1]) for i in range(0,", "tqdm import tqdm from dfp_main import patch, PatchStats, setVerbose TEST_SET_PATH = \"testSet\" #", "verified_patches = [p for stat in all_stats for p in stat.patches] position_dist =", "all_stats.append(stats) for s in all_stats: print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f: pickle.dump(all_stats, f,", "pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda it: it.time, all_stats)) avg_time = sum(times) /", "position_dist = {} rule_dist = {} for p in verified_patches: if p.position not", "it.total, all_stats)) fixed = sum(map(lambda it: it.fixed, all_stats)) unfixed = sum(map(lambda it: it.unfixed,", "should be examined [0,100] LIMIT = None def evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs", "None def evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i], testFiles[i + 1]) for", "print(s) with open(f\"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl\", \"wb\") as f: pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda it:", "import pickle from pathlib import Path from datetime import datetime from tqdm import", "position_dist: position_dist[p.position] = 0 position_dist[p.position] += 1 if p.rule not in rule_dist: rule_dist[p.rule]", "it: it.time, all_stats)) avg_time = sum(times) / len(times) total = sum(map(lambda it: it.total,", "import patch, PatchStats, setVerbose TEST_SET_PATH = \"testSet\" # How many test files should", "in rule_dist: rule_dist[p.rule] = 0 rule_dist[p.rule] += 1 setVerbose(True) PatchStats(total, fixed, unfixed).print() print(f\"Average", "protocol=pickle.HIGHEST_PROTOCOL) times = list(map(lambda it: it.time, all_stats)) avg_time = sum(times) / len(times) total", "= None def evaluateTestSet(): testFiles = list(Path(TEST_SET_PATH).iterdir()) testPairs = [(testFiles[i], testFiles[i + 1])", "all_stats for p in stat.patches] position_dist = {} rule_dist = {} for p", "for dockerfile, violationFile in tqdm(testPairs[:LIMIT]): stats = patch(str(dockerfile), str(violationFile), \"hadolint.exe\", quiet=True) all_stats.append(stats) for", "rule_dist[p.rule] = 0 rule_dist[p.rule] += 1 setVerbose(True) PatchStats(total, fixed, unfixed).print() print(f\"Average time: {avg_time}s\")" ]
[ "Generator from conans.paths import CONANENV class ConanEnvGenerator(Generator): @property def filename(self): return CONANENV @property", "CONANENV class ConanEnvGenerator(Generator): @property def filename(self): return CONANENV @property def content(self): return self.deps_env_info.dumps()", "import CONANENV class ConanEnvGenerator(Generator): @property def filename(self): return CONANENV @property def content(self): return", "conans.paths import CONANENV class ConanEnvGenerator(Generator): @property def filename(self): return CONANENV @property def content(self):", "conans.model import Generator from conans.paths import CONANENV class ConanEnvGenerator(Generator): @property def filename(self): return", "import Generator from conans.paths import CONANENV class ConanEnvGenerator(Generator): @property def filename(self): return CONANENV", "from conans.model import Generator from conans.paths import CONANENV class ConanEnvGenerator(Generator): @property def filename(self):", "from conans.paths import CONANENV class ConanEnvGenerator(Generator): @property def filename(self): return CONANENV @property def" ]
[ "<reponame>rhiggins2308/G00364712-problemSet<filename>Classwork/format.py for i in range(1, 11): print('{:2d} {:3d} {:4d} {:5d}'.format(i, i**2, i**3, i**4))" ]
[ "configure(self, configs, is_key): pass def close(self): pass class BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic: str,", "from .deserializer import Deserializer from .serializer import Serializer class BytesSerializer(Serializer[bytes]): def serialize(self, topic:", "def configure(self, configs, is_key): pass def close(self): pass class BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic:", "bytes: return data def configure(self, configs, is_key): pass def close(self): pass class BytesDeserializer(Deserializer[bytes]):", "-> bytes: return data def configure(self, configs, is_key): pass def close(self): pass class", "def deserialize(self, topic: str, data: bytes) -> bytes: return data def configure(self, configs,", "def serialize(self, topic: str, data: bytes) -> bytes: return data def configure(self, configs,", "str, data: bytes) -> bytes: return data def configure(self, configs, is_key): pass def", "BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic: str, data: bytes) -> bytes: return data def configure(self,", "close(self): pass class BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic: str, data: bytes) -> bytes: return", "<reponame>newellp2019/pyconfluent from .deserializer import Deserializer from .serializer import Serializer class BytesSerializer(Serializer[bytes]): def serialize(self,", "BytesSerializer(Serializer[bytes]): def serialize(self, topic: str, data: bytes) -> bytes: return data def configure(self,", ".deserializer import Deserializer from .serializer import Serializer class BytesSerializer(Serializer[bytes]): def serialize(self, topic: str,", "pass def close(self): pass class BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic: str, data: bytes) ->", "def close(self): pass class BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic: str, data: bytes) -> bytes:", "data: bytes) -> bytes: return data def configure(self, configs, is_key): pass def close(self):", "Deserializer from .serializer import Serializer class BytesSerializer(Serializer[bytes]): def serialize(self, topic: str, data: bytes)", "class BytesSerializer(Serializer[bytes]): def serialize(self, topic: str, data: bytes) -> bytes: return data def", "bytes) -> bytes: return data def configure(self, configs, is_key): pass def close(self): pass", "topic: str, data: bytes) -> bytes: return data def configure(self, configs, is_key): pass", ".serializer import Serializer class BytesSerializer(Serializer[bytes]): def serialize(self, topic: str, data: bytes) -> bytes:", "data def configure(self, configs, is_key): pass def close(self): pass class BytesDeserializer(Deserializer[bytes]): def deserialize(self,", "from .serializer import Serializer class BytesSerializer(Serializer[bytes]): def serialize(self, topic: str, data: bytes) ->", "is_key): pass def close(self): pass class BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic: str, data: bytes)", "class BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic: str, data: bytes) -> bytes: return data def", "deserialize(self, topic: str, data: bytes) -> bytes: return data def configure(self, configs, is_key):", "return data def configure(self, configs, is_key): pass def close(self): pass class BytesDeserializer(Deserializer[bytes]): def", "configs, is_key): pass def close(self): pass class BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic: str, data:", "serialize(self, topic: str, data: bytes) -> bytes: return data def configure(self, configs, is_key):", "import Serializer class BytesSerializer(Serializer[bytes]): def serialize(self, topic: str, data: bytes) -> bytes: return", "pass class BytesDeserializer(Deserializer[bytes]): def deserialize(self, topic: str, data: bytes) -> bytes: return data", "Serializer class BytesSerializer(Serializer[bytes]): def serialize(self, topic: str, data: bytes) -> bytes: return data", "import Deserializer from .serializer import Serializer class BytesSerializer(Serializer[bytes]): def serialize(self, topic: str, data:" ]
[ "django.apps import AppConfig from .signal_handlers import install_signal_handlers class KulkunenConfig(AppConfig): name = 'kulkunen' verbose_name", "from django.apps import AppConfig from .signal_handlers import install_signal_handlers class KulkunenConfig(AppConfig): name = 'kulkunen'", "from .signal_handlers import install_signal_handlers class KulkunenConfig(AppConfig): name = 'kulkunen' verbose_name = 'Kulkunen' def", "import AppConfig from .signal_handlers import install_signal_handlers class KulkunenConfig(AppConfig): name = 'kulkunen' verbose_name =", ".signal_handlers import install_signal_handlers class KulkunenConfig(AppConfig): name = 'kulkunen' verbose_name = 'Kulkunen' def ready(self):", "import install_signal_handlers class KulkunenConfig(AppConfig): name = 'kulkunen' verbose_name = 'Kulkunen' def ready(self): install_signal_handlers()", "AppConfig from .signal_handlers import install_signal_handlers class KulkunenConfig(AppConfig): name = 'kulkunen' verbose_name = 'Kulkunen'" ]
[ "radius of the circle : \") r=float(X) a=3.14*r**2 print(\"area of circle : \",a)", "the radius of the circle : \") r=float(X) a=3.14*r**2 print(\"area of circle :", "X=input(\"input the radius of the circle : \") r=float(X) a=3.14*r**2 print(\"area of circle", "<reponame>Rugvedkaikamwar/result X=input(\"input the radius of the circle : \") r=float(X) a=3.14*r**2 print(\"area of" ]
[ "$refs in the schema, should open two files. schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called()", "cache should only open the asked-for file schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]])", "cached root.json as well; call_count stays the same schema_cache[\"root.json\"] assert mocked_open.call_count == 2", "\"__missing__\") schema_cache = SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) calls", "the schema, should open two files. schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count", "mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def", "should open two files. schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2", "mocked_open.call_count == 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving cache should only open the asked-for", "assert mocked_open.call_count == 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving cache should only open the", "SchemaCache() assert schema_cache is not None mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\")", "2 # Request the same schema again; call_count stays the same. schema_cache[\"probe-event.json\"] assert", "# Resolving should have cached root.json as well; call_count stays the same schema_cache[\"root.json\"]", "schema_cache is not None mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache =", "side_effect=open) calls = [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ] # Defaults", "import SCHEMATA_PATH from uptimer.events.cache import SchemaCache def test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache", "schema_cache = SchemaCache() assert schema_cache is not None mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open =", "the asked-for file schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open =", "SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) schema_cache = SchemaCache()", "mocked_open.assert_called() assert mocked_open.call_count == 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving cache should only open", "Request the same schema again; call_count stays the same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count ==", "None mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\")", "open two files. schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 mocked_open.assert_has_calls(calls)", "schema_cache[\"probe-event.json\"] assert mocked_open.call_count == 2 # Resolving should have cached root.json as well;", "= SchemaCache() assert schema_cache is not None mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache,", "[ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ] # Defaults to resolve $refs", "mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open)", "= SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 # Request the same schema", "def test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count", "2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving cache should only open the asked-for file schema_cache_non_resolving", "mocked_open = mocker.patch(\"builtins.open\", side_effect=open) schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2", "from unittest.mock import call from uptimer.events import SCHEMATA_PATH from uptimer.events.cache import SchemaCache def", "schema_cache = SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) calls =", "import SchemaCache def test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() assert schema_cache", "= SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving cache", "open the asked-for file schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open", "assert schema_cache is not None mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache", "schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving", "= [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ] # Defaults to resolve", "mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving cache should only open the asked-for file schema_cache_non_resolving =", "mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open =", "\"__missing__\") schema_cache = SchemaCache() assert schema_cache is not None mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open", "again; call_count stays the same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count == 2 # Resolving should", "same schema again; call_count stays the same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count == 2 #", "uptimer.events.cache import SchemaCache def test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() assert", "test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open", "schema, should open two files. schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count ==", "= mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() assert schema_cache is not None mocked_open.assert_not_called() def", "mocked_open = mocker.patch(\"builtins.open\", side_effect=open) calls = [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"),", "import call from uptimer.events import SCHEMATA_PATH from uptimer.events.cache import SchemaCache def test_schemacache_init(mocker): mocked_open", "2 # Resolving should have cached root.json as well; call_count stays the same", "SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) calls = [ call(path.join(SCHEMATA_PATH,", "def test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() assert schema_cache is not", "= SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) calls = [", "\"root.json\"), \"r\"), ] # Defaults to resolve $refs in the schema, should open", "schema again; call_count stays the same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count == 2 # Resolving", "have cached root.json as well; call_count stays the same schema_cache[\"root.json\"] assert mocked_open.call_count ==", "from uptimer.events.cache import SchemaCache def test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache()", "the same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count == 2 # Resolving should have cached root.json", "call_count stays the same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count == 2 # Resolving should have", "def test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) calls = [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH,", "assert mocked_open.call_count == 2 # Request the same schema again; call_count stays the", "] # Defaults to resolve $refs in the schema, should open two files.", "= SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) schema_cache =", "from uptimer.events import SCHEMATA_PATH from uptimer.events.cache import SchemaCache def test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache,", "files. schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() #", "mocker.patch(\"builtins.open\", side_effect=open) schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 # Request", "schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 # Request the same schema again; call_count", "uptimer.events import SCHEMATA_PATH from uptimer.events.cache import SchemaCache def test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\")", "SCHEMATA_PATH from uptimer.events.cache import SchemaCache def test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache =", "in the schema, should open two files. schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert", "should only open the asked-for file schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def", "the same schema again; call_count stays the same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count == 2", "should have cached root.json as well; call_count stays the same schema_cache[\"root.json\"] assert mocked_open.call_count", "assert mocked_open.call_count == 2 # Resolving should have cached root.json as well; call_count", "SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving cache should", "SchemaCache def test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() assert schema_cache is", "import path from unittest.mock import call from uptimer.events import SCHEMATA_PATH from uptimer.events.cache import", "not None mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() schema_cache[\"root.json\"]", "schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) calls = [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"),", "schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 # Request the same", "mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called()", "os import path from unittest.mock import call from uptimer.events import SCHEMATA_PATH from uptimer.events.cache", "to resolve $refs in the schema, should open two files. schema_cache = SchemaCache()", "resolve $refs in the schema, should open two files. schema_cache = SchemaCache() schema_cache[\"probe-event.json\"]", "mocked_open.assert_called() assert mocked_open.call_count == 2 # Request the same schema again; call_count stays", "def test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker):", "mocked_open.call_count == 2 # Request the same schema again; call_count stays the same.", "\"r\"), ] # Defaults to resolve $refs in the schema, should open two", "# Request the same schema again; call_count stays the same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count", "schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) schema_cache = SchemaCache() schema_cache[\"probe-event.json\"]", "mocker.patch(\"builtins.open\", side_effect=open) calls = [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ] #", "asked-for file schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\",", "mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() assert schema_cache is not None mocked_open.assert_not_called() def test_loading_missing_schema(mocker):", "only open the asked-for file schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker):", "calls = [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ] # Defaults to", "unittest.mock import call from uptimer.events import SCHEMATA_PATH from uptimer.events.cache import SchemaCache def test_schemacache_init(mocker):", "file schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open)", "side_effect=open) schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 # Request the", "= mocker.patch(\"builtins.open\", side_effect=open) schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 #", "call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ] # Defaults to resolve $refs in the schema, should", "test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count ==", "two files. schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock()", "schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once() mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) schema_cache", "call from uptimer.events import SCHEMATA_PATH from uptimer.events.cache import SchemaCache def test_schemacache_init(mocker): mocked_open =", "= mocker.patch(\"builtins.open\", side_effect=open) calls = [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ]", "== 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving cache should only open the asked-for file", "Resolving should have cached root.json as well; call_count stays the same schema_cache[\"root.json\"] assert", "test_schemacache_init(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() assert schema_cache is not None", "# Defaults to resolve $refs in the schema, should open two files. schema_cache", "== 2 # Request the same schema again; call_count stays the same. schema_cache[\"probe-event.json\"]", "\"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ] # Defaults to resolve $refs in the", "mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) calls = [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"),", "call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ] # Defaults to resolve $refs in", "path from unittest.mock import call from uptimer.events import SCHEMATA_PATH from uptimer.events.cache import SchemaCache", "mocked_open.assert_has_calls([calls[0]]) def test_return_cached_result(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) schema_cache = SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert", "== 2 # Resolving should have cached root.json as well; call_count stays the", "Defaults to resolve $refs in the schema, should open two files. schema_cache =", "SchemaCache() schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 # Request the same schema again;", "# Non-resolving cache should only open the asked-for file schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"]", "same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count == 2 # Resolving should have cached root.json as", "stays the same. schema_cache[\"probe-event.json\"] assert mocked_open.call_count == 2 # Resolving should have cached", "\"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"), \"r\"), ] # Defaults to resolve $refs in the schema,", "test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\", side_effect=open) calls = [ call(path.join(SCHEMATA_PATH, \"probe-event.json\"), \"r\"), call(path.join(SCHEMATA_PATH, \"root.json\"),", "mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() assert schema_cache is not None mocked_open.assert_not_called()", "Non-resolving cache should only open the asked-for file schema_cache_non_resolving = SchemaCache(resolve_refs=False) schema_cache_non_resolving[\"probe-event.json\"] mocked_open.assert_called_once()", "mocked_open.call_count == 2 # Resolving should have cached root.json as well; call_count stays", "from os import path from unittest.mock import call from uptimer.events import SCHEMATA_PATH from", "= mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache() schema_cache[\"root.json\"] mocked_open.assert_called_once_with(\"root.json\") def test_loading_dependant_of_root_json(mocker): mocked_open = mocker.patch(\"builtins.open\",", "is not None mocked_open.assert_not_called() def test_loading_missing_schema(mocker): mocked_open = mocker.patch.object(SchemaCache, \"__missing__\") schema_cache = SchemaCache()", "schema_cache[\"probe-event.json\"] mocked_open.assert_called() assert mocked_open.call_count == 2 mocked_open.assert_has_calls(calls) mocked_open.reset_mock() # Non-resolving cache should only", "mocked_open.reset_mock() # Non-resolving cache should only open the asked-for file schema_cache_non_resolving = SchemaCache(resolve_refs=False)" ]
[ "import Optional def get_task_id() -> Optional[str]: try: return os.getenv('CRAWLAB_TASK_ID') except Exception: return None", "<reponame>twinsant/crawlab-sdk import os from typing import Optional def get_task_id() -> Optional[str]: try: return", "os from typing import Optional def get_task_id() -> Optional[str]: try: return os.getenv('CRAWLAB_TASK_ID') except", "typing import Optional def get_task_id() -> Optional[str]: try: return os.getenv('CRAWLAB_TASK_ID') except Exception: return", "from typing import Optional def get_task_id() -> Optional[str]: try: return os.getenv('CRAWLAB_TASK_ID') except Exception:", "import os from typing import Optional def get_task_id() -> Optional[str]: try: return os.getenv('CRAWLAB_TASK_ID')" ]
[ "} }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'standard' },", "only works when scrapy-mode is 'update' argparser.add_argument( '--expire-time', nargs = 1, type =", "'console': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'standard' }, 'file': { 'class': 'logging.FileHandler',", "sub-directory os.chdir('images') logger.debug('changed working directory to images.') artworks = os.listdir('.') for artwork in", "'__main__': # parse arguments from command line arguments = parse_arguments() # configure logger", "not exists if not util.create_images_directory(): exit(-1) # set signal handler signal.signal(signal.SIGINT, signal_handler) #", "= scraper.scrapy_pending_url() if artwork: # extend added time artwork['Added'] = util.get_current_time() information =", "to perform integrity check if not arguments.skip_check: if scrapy_mode == 'default': check_and_fix_artworks(db, scraper)", "attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: # update added time and set ID", "check in update mode.') else: logger.info('skipped integrity check.') # main body if scrapy_mode", "as login status' ) # base-url - sub-url scraper to replace with default", "default = ['default'], choices = ['default', 'update'], help = 'sets scrapying mode, default:", "= 1, default = ['default'], choices = ['default', 'update'], help = 'sets scrapying", "open('scraper.cache', 'rb') as temp: scraper = pickle.load(temp) logger.info('continued with last scrapying progress, with", "logging import logging.config def signal_handler(signum, frame): # exit signal received, use pickle to", "scraper) logger.info('integrity check completed.') else: logger.info('will not perform integrity check in update mode.')", "round.') elif scrapy_mode == 'update': # get expired artwork IDs from database expired_artwork_ids", "configured.') return logger def check_and_fix_artworks(db, scraper): \"\"\" Integrity check step. Traverse through database", "at the very first of program. Args: console_log_level - console log level, while", "else: cookies = {} if arguments.cookies: # load provided cookies from file cookies", "logger.info('updated artwork information: %s' % information) # replace record in database db.insert_or_replace_artwork(artwork) logger.info('completed", "line arguments = parse_arguments() # configure logger log_level = arguments.log_level[0].upper() logger = config_logger(log_level)", "them from database, and add there urls to scraper's scrapying queue. ONLY works", "sub-url scraper to replace with default '/', must be a valid sub-url defined", "progress to scraper.cache.') exit(0) def parse_arguments(): \"\"\" Parse arguments from commandline. Args: None", "integrity check.') # main body if scrapy_mode == 'default': while True: # scrapy", "artwork_id in expired_artwork_ids: # try to artwork attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork:", "load *manually* because pickle will NOT save class variable scrapy.Scraper.SCRAPIED_BASE = True #", "check_and_fix_artworks(db, scraper): \"\"\" Integrity check step. Traverse through database and see if for", "to replace with default '/', must be a valid sub-url defined in constant.py", "import logging import logging.config def signal_handler(signum, frame): # exit signal received, use pickle", "# scrapy-interval - int ,set scraper's sleep interval between two requests argparser.add_argument( '-i',", "temp) logger.info('successfully saved scrapying progress to scraper.cache.') exit(0) def parse_arguments(): \"\"\" Parse arguments", "the user cookies(json format file) to be used, needed if you want to", "when specified, skip integrity check step argparser.add_argument( '--skip-check', action='store_true', help = 'skip integrity", "ID: %u.' % artwork.get('ID')) else: logger.info('didn\\'t scrapy artwork in current round.') elif scrapy_mode", "scrape as login status' ) # base-url - sub-url scraper to replace with", "urls and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented for potiential", "1, 'formatters': { 'standard': { 'format': '%(asctime)s - [%(levelname)s] %(message)s' } }, 'handlers':", "constant.py argparser.add_argument( '--begin-url', nargs = 1, help = 'begin sub-URL to replace default", "'error', 'fatal' # default is info, set the console log level argparser.add_argument( '--log-level',", "images.') artworks = os.listdir('.') for artwork in artworks: if os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0])", "= {} if arguments.cookies: # load provided cookies from file cookies = util.get_cookies(arguments.cookies[0])", "and scraper db = database.Database('fa_scraper.db') if util.if_cache_exists(): # trying to load scraper from", "config_logger(console_log_level): \"\"\" Configure logger, should be called at the very first of program.", "progress, with %u scrapied urls and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) #", "def parse_arguments(): \"\"\" Parse arguments from commandline. Args: None Returns: arguments - arguments", "'--skip-check', action='store_true', help = 'skip integrity check(ONLY works in default mode) between database", "Integrity check step. Traverse through database and see if for each artwork, there", "mode. Args: db - database instance scraper - scraper instance \"\"\" # get", "['console', 'file'], 'level': 'DEBUG', 'propagate': True } } } config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config)", "choices = ['default', 'update'], help = 'sets scrapying mode, default: default' ) #", "for each artwork, there exists a corresponding image in images sub-directory. If there", "# base-url - sub-url scraper to replace with default '/', must be a", "skip integrity check step argparser.add_argument( '--skip-check', action='store_true', help = 'skip integrity check(ONLY works", "database, and add there urls to scraper's scrapying queue. ONLY works in default", "argparser.add_argument( '--begin-url', nargs = 1, help = 'begin sub-URL to replace default \"/\",", "cen be choosen from 'debug', 'info', 'warning', 'error', 'fatal' # default is info,", "check step. Traverse through database and see if for each artwork, there exists", "artwork_id in artwork_ids: artwork_ids.remove(artwork_id) # remove remaining artwork records from database db.delete_artworks(artwork_ids) #", "'--log-level', nargs = 1, default = ['info'], choices = ['debug', 'info', 'warning', 'error',", "= int(os.path.splitext(os.path.basename(artwork))[0]) # if exists image named 'artwork ID', remove it from set", "[%(levelname)s] %(message)s' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter':", "'images' sub-directory os.chdir('images') logger.debug('changed working directory to images.') artworks = os.listdir('.') for artwork", "'class': 'logging.FileHandler', 'filename': 'fa_scraper.log', 'level': 'DEBUG', 'formatter': 'standard' } }, 'loggers': { 'default':", "check completed.') else: logger.info('will not perform integrity check in update mode.') else: logger.info('skipped", "sub-url defined in constant.py argparser.add_argument( '--begin-url', nargs = 1, help = 'begin sub-URL", "help = 'skip integrity check(ONLY works in default mode) between database and images'", "'%(asctime)s - [%(levelname)s] %(message)s' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'level':", "\"\"\" config = { 'version': 1, 'formatters': { 'standard': { 'format': '%(asctime)s -", "logger = logging.getLogger('default') logger.info('set console log level to %s' % console_log_level) logger.debug('logger configured.')", "scrapy-mode - can be choosen from 'default', 'update' # default is 'default', set", "logger.info('integrity check completed.') else: logger.info('will not perform integrity check in update mode.') else:", "== 'update': # get expired artwork IDs from database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved", "arguments = parse_arguments() # configure logger log_level = arguments.log_level[0].upper() logger = config_logger(log_level) #", "to images.') artworks = os.listdir('.') for artwork in artworks: if os.path.isfile(artwork): artwork_id =", "sys import os import signal import pickle import json import logging import logging.config", "logger.debug('changed working directory to images.') artworks = os.listdir('.') for artwork in artworks: if", "logger.info('retrieved all expired artwork IDs.') for artwork_id in expired_artwork_ids: # try to artwork", "scraper logger.info('exit signal received, saving scrapying progress...') logger.info('current scraper with %u urls scrapied,", "logger.info('successfully saved scrapying progress to scraper.cache.') exit(0) def parse_arguments(): \"\"\" Parse arguments from", "Args: console_log_level - console log level, while log file level is fixed to", ") arguments = argparser.parse_args() return arguments def config_logger(console_log_level): \"\"\" Configure logger, should be", "in current round.') elif scrapy_mode == 'update': # get expired artwork IDs from", "signal handler signal.signal(signal.SIGINT, signal_handler) # initialize database and scraper db = database.Database('fa_scraper.db') if", "urls scrapied, and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache', 'wb') as", "database and images' ) # log-level - cen be choosen from 'debug', 'info',", "integrity check in update mode.') else: logger.info('skipped integrity check.') # main body if", "= 'specify the user cookies(json format file) to be used, needed if you", "If there are artworks missing, remove them from database, and add there urls", "update added time and set ID artwork['ID'] = artwork_id artwork['Added'] = util.get_current_time() information", "None Returns: arguments - arguments parsed from command line \"\"\" argparser = argparse.ArgumentParser(", "logger.debug('logger configured.') return logger def check_and_fix_artworks(db, scraper): \"\"\" Integrity check step. Traverse through", "potiential error # fix Scraper lazy load *manually* because pickle will NOT save", "% artwork.get('ID')) else: logger.info('didn\\'t scrapy artwork in current round.') elif scrapy_mode == 'update':", ") # cookies - filename, use cookies(json) provided to scrape as logined argparser.add_argument(", "level is fixed to debug \"\"\" config = { 'version': 1, 'formatters': {", "with python.' ) # scrapy-mode - can be choosen from 'default', 'update' #", "'/', must be a valid sub-url defined in constant.py argparser.add_argument( '--begin-url', nargs =", "and images' ) # log-level - cen be choosen from 'debug', 'info', 'warning',", "artwork_id artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('updated artwork information: %s' % information)", "scrapy artwork with ID: %u.' % artwork.get('ID')) else: logger.info('didn\\'t scrapy artwork in current", "%u.' % artwork.get('ID')) else: logger.info('didn\\'t scrapy artwork in current round.') elif scrapy_mode ==", "class variable scrapy.Scraper.SCRAPIED_BASE = True # reset scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0] else: cookies", "current round.') elif scrapy_mode == 'update': # get expired artwork IDs from database", "return arguments def config_logger(console_log_level): \"\"\" Configure logger, should be called at the very", "signal_handler(signum, frame): # exit signal received, use pickle to dump scraper logger.info('exit signal", "scrapy-mode is 'update' argparser.add_argument( '--expire-time', nargs = 1, type = int, default =", "mode argparser.add_argument( '-m', '--scrapy-mode', nargs = 1, default = ['default'], choices = ['default',", "= scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization completed.') scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy mode set to", "argparser.add_argument( '--log-level', nargs = 1, default = ['info'], choices = ['debug', 'info', 'warning',", "'sets verbosity level for console log messages, default: info' ) arguments = argparser.parse_args()", "import logging.config def signal_handler(signum, frame): # exit signal received, use pickle to dump", "argparser.add_argument( '--skip-check', action='store_true', help = 'skip integrity check(ONLY works in default mode) between", "command line arguments = parse_arguments() # configure logger log_level = arguments.log_level[0].upper() logger =", "urls and add to scrapying queue unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed", "to scrapy artwork with ID: %u.' % artwork.get('ID')) else: logger.info('didn\\'t scrapy artwork in", "default = [15], help = 'sets expire time(days) for scrapied images, default: 15'", "commented for potiential error # fix Scraper lazy load *manually* because pickle will", "= os.listdir('.') for artwork in artworks: if os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) # if", "import pickle import json import logging import logging.config def signal_handler(signum, frame): # exit", "'formatters': { 'standard': { 'format': '%(asctime)s - [%(levelname)s] %(message)s' } }, 'handlers': {", "'sets scrapying mode, default: default' ) # expire-time - int, set expire time", "images sub-directory. If there are artworks missing, remove them from database, and add", "ONLY works in default mode. Args: db - database instance scraper - scraper", "scraper with %u urls scrapied, and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with", "file level is fixed to debug \"\"\" config = { 'version': 1, 'formatters':", "'DEBUG', 'formatter': 'standard' }, 'file': { 'class': 'logging.FileHandler', 'filename': 'fa_scraper.log', 'level': 'DEBUG', 'formatter':", "initialize database and scraper db = database.Database('fa_scraper.db') if util.if_cache_exists(): # trying to load", "information) # insert into database db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy artwork with ID: %u.'", "'formatter': 'standard' }, 'file': { 'class': 'logging.FileHandler', 'filename': 'fa_scraper.log', 'level': 'DEBUG', 'formatter': 'standard'", "requests, default: 60' ) # cookies - filename, use cookies(json) provided to scrape", "'error', 'fatal'], help = 'sets verbosity level for console log messages, default: info'", "signal import pickle import json import logging import logging.config def signal_handler(signum, frame): #", "added time artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('scrapied artwork information: %s' %", "trying to load scraper from scraper.cache with open('scraper.cache', 'rb') as temp: scraper =", "arguments def config_logger(console_log_level): \"\"\" Configure logger, should be called at the very first", "%s' % information) # insert into database db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy artwork with", "to %s' % scrapy_mode) # try to perform integrity check if not arguments.skip_check:", "- console log level, while log file level is fixed to debug \"\"\"", "urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache', 'wb') as temp: pickle.dump(scraper, temp) logger.info('successfully saved", "set ID artwork['ID'] = artwork_id artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('updated artwork", "'default', 'update' # default is 'default', set scrapy mode argparser.add_argument( '-m', '--scrapy-mode', nargs", "use pickle to dump scraper logger.info('exit signal received, saving scrapying progress...') logger.info('current scraper", "- filename, use cookies(json) provided to scrape as logined argparser.add_argument( '-c', '--cookies', nargs", "int, default = [15], help = 'sets expire time(days) for scrapied images, default:", "- int ,set scraper's sleep interval between two requests argparser.add_argument( '-i', '--scrapy-interval', nargs", "arguments.log_level[0].upper() logger = config_logger(log_level) # create images sub-directory if not exists if not", "- cen be choosen from 'debug', 'info', 'warning', 'error', 'fatal' # default is", "scrapy loop # try to get artwork from scraper artwork = scraper.scrapy_pending_url() if", "furaffinity.net written with python.' ) # scrapy-mode - can be choosen from 'default',", "logging.config def signal_handler(signum, frame): # exit signal received, use pickle to dump scraper", "signal_handler) # initialize database and scraper db = database.Database('fa_scraper.db') if util.if_cache_exists(): # trying", "'A scraper of furaffinity.net written with python.' ) # scrapy-mode - can be", "written with python.' ) # scrapy-mode - can be choosen from 'default', 'update'", "example' ) # skip-check - when specified, skip integrity check step argparser.add_argument( '--skip-check',", "= config_logger(log_level) # create images sub-directory if not exists if not util.create_images_directory(): exit(-1)", "scrapying progress, with %u scrapied urls and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue)))", "= ['info'], choices = ['debug', 'info', 'warning', 'error', 'fatal'], help = 'sets verbosity", "logger.info('scrapied artwork information: %s' % information) # insert into database db.insert_or_replace_artwork(artwork) logger.info('completed to", "(len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented for potiential error # fix Scraper lazy load", "arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization completed.') scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy mode", "console log level argparser.add_argument( '--log-level', nargs = 1, default = ['info'], choices =", "scrapying queue unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working directory to origin.')", "help = 'sets scrapying mode, default: default' ) # expire-time - int, set", "argparser.parse_args() return arguments def config_logger(console_log_level): \"\"\" Configure logger, should be called at the", "level to %s' % console_log_level) logger.debug('logger configured.') return logger def check_and_fix_artworks(db, scraper): \"\"\"", "set the console log level argparser.add_argument( '--log-level', nargs = 1, default = ['info'],", "= 1, type = int, default = [60], help = 'sets sleep interval(seconds)", "from 'default', 'update' # default is 'default', set scrapy mode argparser.add_argument( '-m', '--scrapy-mode',", "# set signal handler signal.signal(signal.SIGINT, signal_handler) # initialize database and scraper db =", "update mode.') else: logger.info('skipped integrity check.') # main body if scrapy_mode == 'default':", "# main body if scrapy_mode == 'default': while True: # scrapy loop #", "database and scraper db = database.Database('fa_scraper.db') if util.if_cache_exists(): # trying to load scraper", "IDs from artwork, and initialize a set artwork_ids = set(db.get_artwork_ids()) # traverse through", "to scraper's scrapying queue. ONLY works in default mode. Args: db - database", "try to artwork attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: # update added time", "artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('scrapied artwork information: %s' % information) #", "scrapying progress to scraper.cache.') exit(0) def parse_arguments(): \"\"\" Parse arguments from commandline. Args:", "through 'images' sub-directory os.chdir('images') logger.debug('changed working directory to images.') artworks = os.listdir('.') for", "= '%s [OPTIONS]' % sys.argv[0], description = 'A scraper of furaffinity.net written with", "os.listdir('.') for artwork in artworks: if os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) # if exists", "= 'skip integrity check(ONLY works in default mode) between database and images' )", "of furaffinity.net written with python.' ) # scrapy-mode - can be choosen from", "scrapy artwork in current round.') elif scrapy_mode == 'update': # get expired artwork", "scrape as logined argparser.add_argument( '-c', '--cookies', nargs = 1, help = 'specify the", "scraper of furaffinity.net written with python.' ) # scrapy-mode - can be choosen", "# get all artwork IDs from artwork, and initialize a set artwork_ids =", "skip-check - when specified, skip integrity check step argparser.add_argument( '--skip-check', action='store_true', help =", "dump scraper logger.info('exit signal received, saving scrapying progress...') logger.info('current scraper with %u urls", "= 1, default = ['info'], choices = ['debug', 'info', 'warning', 'error', 'fatal'], help", "queue. ONLY works in default mode. Args: db - database instance scraper -", "= database.Database('fa_scraper.db') if util.if_cache_exists(): # trying to load scraper from scraper.cache with open('scraper.cache',", "arguments from commandline. Args: None Returns: arguments - arguments parsed from command line", "scraper's sleep interval between two requests argparser.add_argument( '-i', '--scrapy-interval', nargs = 1, type", "arguments parsed from command line \"\"\" argparser = argparse.ArgumentParser( usage = '%s [OPTIONS]'", "# exit signal received, use pickle to dump scraper logger.info('exit signal received, saving", "want to scrape as login status' ) # base-url - sub-url scraper to", "is fixed to debug \"\"\" config = { 'version': 1, 'formatters': { 'standard':", "help = 'begin sub-URL to replace default \"/\", \"/user/blackdragonf\" for example' ) #", "}, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'standard' }, 'file':", "scraper artwork = scraper.scrapy_pending_url() if artwork: # extend added time artwork['Added'] = util.get_current_time()", "exists if not util.create_images_directory(): exit(-1) # set signal handler signal.signal(signal.SIGINT, signal_handler) # initialize", "def config_logger(console_log_level): \"\"\" Configure logger, should be called at the very first of", "through database and see if for each artwork, there exists a corresponding image", "- when specified, skip integrity check step argparser.add_argument( '--skip-check', action='store_true', help = 'skip", "while log file level is fixed to debug \"\"\" config = { 'version':", "config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config) logger = logging.getLogger('default') logger.info('set console log level to %s'", "traverse through 'images' sub-directory os.chdir('images') logger.debug('changed working directory to images.') artworks = os.listdir('.')", "util.get_cookies(arguments.cookies[0]) begin_url = None if arguments.begin_url: # alternative begin-url specified begin_url = arguments.begin_url[0]", "1, type = int, default = [60], help = 'sets sleep interval(seconds) between", "db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy expired artwork(with ID: %u)\\'s info .' % artwork.get('ID')) db.close_db(conn)", "cookies from file cookies = util.get_cookies(arguments.cookies[0]) begin_url = None if arguments.begin_url: # alternative", "defined in constant.py argparser.add_argument( '--begin-url', nargs = 1, help = 'begin sub-URL to", "for potiential error # fix Scraper lazy load *manually* because pickle will NOT", "# try to perform integrity check if not arguments.skip_check: if scrapy_mode == 'default':", "temp: scraper = pickle.load(temp) logger.info('continued with last scrapying progress, with %u scrapied urls", "from database.' % len(artwork_ids)) if __name__ == '__main__': # parse arguments from command", "replace with default '/', must be a valid sub-url defined in constant.py argparser.add_argument(", "two requests argparser.add_argument( '-i', '--scrapy-interval', nargs = 1, type = int, default =", "= { 'version': 1, 'formatters': { 'standard': { 'format': '%(asctime)s - [%(levelname)s] %(message)s'", "origin.') logger.info('%u wrong records removed from database.' % len(artwork_ids)) if __name__ == '__main__':", "\"\"\" # get all artwork IDs from artwork, and initialize a set artwork_ids", "}, 'file': { 'class': 'logging.FileHandler', 'filename': 'fa_scraper.log', 'level': 'DEBUG', 'formatter': 'standard' } },", "# replace record in database db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy expired artwork(with ID: %u)\\'s", "corresponding image in images sub-directory. If there are artworks missing, remove them from", "for console log messages, default: info' ) arguments = argparser.parse_args() return arguments def", "and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented for potiential error", "def signal_handler(signum, frame): # exit signal received, use pickle to dump scraper logger.info('exit", "urls to scraper's scrapying queue. ONLY works in default mode. Args: db -", "arguments.begin_url: # alternative begin-url specified begin_url = arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url)", "working directory to images.') artworks = os.listdir('.') for artwork in artworks: if os.path.isfile(artwork):", "= ['debug', 'info', 'warning', 'error', 'fatal'], help = 'sets verbosity level for console", "provided cookies from file cookies = util.get_cookies(arguments.cookies[0]) begin_url = None if arguments.begin_url: #", "True # reset scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0] else: cookies = {} if arguments.cookies:", "'info', 'warning', 'error', 'fatal' # default is info, set the console log level", "'info', 'warning', 'error', 'fatal'], help = 'sets verbosity level for console log messages,", "config = { 'version': 1, 'formatters': { 'standard': { 'format': '%(asctime)s - [%(levelname)s]", "console_log_level - console log level, while log file level is fixed to debug", "remove it from set if artwork_id in artwork_ids: artwork_ids.remove(artwork_id) # remove remaining artwork", "# initialize database and scraper db = database.Database('fa_scraper.db') if util.if_cache_exists(): # trying to", "from database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired artwork IDs.') for artwork_id in", "in constant.py argparser.add_argument( '--begin-url', nargs = 1, help = 'begin sub-URL to replace", "artwork information: %s' % information) # replace record in database db.insert_or_replace_artwork(artwork) logger.info('completed to", "logger.info('initialization completed.') scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy mode set to %s' % scrapy_mode) #", "= arguments.log_level[0].upper() logger = config_logger(log_level) # create images sub-directory if not exists if", "list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working directory to origin.') logger.info('%u wrong records removed", "= 1, help = 'specify the user cookies(json format file) to be used,", "wrong records removed from database.' % len(artwork_ids)) if __name__ == '__main__': # parse", "default mode) between database and images' ) # log-level - cen be choosen", "from artwork, and initialize a set artwork_ids = set(db.get_artwork_ids()) # traverse through 'images'", "action='store_true', help = 'skip integrity check(ONLY works in default mode) between database and", "images, default: 15' ) # scrapy-interval - int ,set scraper's sleep interval between", "artwork with ID: %u.' % artwork.get('ID')) else: logger.info('didn\\'t scrapy artwork in current round.')", "with last scrapying progress, with %u scrapied urls and %u scrapying urls.' %", "fa_scraper import * import argparse import sys import os import signal import pickle", "a set artwork_ids = set(db.get_artwork_ids()) # traverse through 'images' sub-directory os.chdir('images') logger.debug('changed working", "['debug', 'info', 'warning', 'error', 'fatal'], help = 'sets verbosity level for console log", "arguments from command line arguments = parse_arguments() # configure logger log_level = arguments.log_level[0].upper()", "'fatal' # default is info, set the console log level argparser.add_argument( '--log-level', nargs", "os.remove('scraper.cache') commented for potiential error # fix Scraper lazy load *manually* because pickle", "= 1, type = int, default = [15], help = 'sets expire time(days)", "if artwork_id in artwork_ids: artwork_ids.remove(artwork_id) # remove remaining artwork records from database db.delete_artworks(artwork_ids)", "records removed from database.' % len(artwork_ids)) if __name__ == '__main__': # parse arguments", "artwork IDs from artwork, and initialize a set artwork_ids = set(db.get_artwork_ids()) # traverse", "remove remaining artwork records from database db.delete_artworks(artwork_ids) # convert artwork IDs to urls", "added time and set ID artwork['ID'] = artwork_id artwork['Added'] = util.get_current_time() information =", "called at the very first of program. Args: console_log_level - console log level,", "logger = config_logger(log_level) # create images sub-directory if not exists if not util.create_images_directory():", "be a valid sub-url defined in constant.py argparser.add_argument( '--begin-url', nargs = 1, help", "arguments - arguments parsed from command line \"\"\" argparser = argparse.ArgumentParser( usage =", "base-url - sub-url scraper to replace with default '/', must be a valid", "set signal handler signal.signal(signal.SIGINT, signal_handler) # initialize database and scraper db = database.Database('fa_scraper.db')", "begin_url = None if arguments.begin_url: # alternative begin-url specified begin_url = arguments.begin_url[0] scraper", "check if not arguments.skip_check: if scrapy_mode == 'default': check_and_fix_artworks(db, scraper) logger.info('integrity check completed.')", "there exists a corresponding image in images sub-directory. If there are artworks missing,", "artwork_ids: artwork_ids.remove(artwork_id) # remove remaining artwork records from database db.delete_artworks(artwork_ids) # convert artwork", "# cookies - filename, use cookies(json) provided to scrape as logined argparser.add_argument( '-c',", "login status' ) # base-url - sub-url scraper to replace with default '/',", "'-i', '--scrapy-interval', nargs = 1, type = int, default = [60], help =", "between two network requests, default: 60' ) # cookies - filename, use cookies(json)", "there urls to scraper's scrapying queue. ONLY works in default mode. Args: db", "db - database instance scraper - scraper instance \"\"\" # get all artwork", "file cookies = util.get_cookies(arguments.cookies[0]) begin_url = None if arguments.begin_url: # alternative begin-url specified", "\"\"\" Parse arguments from commandline. Args: None Returns: arguments - arguments parsed from", "of program. Args: console_log_level - console log level, while log file level is", "= util.get_current_time() information = json.dumps(artwork) logger.info('updated artwork information: %s' % information) # replace", "will NOT save class variable scrapy.Scraper.SCRAPIED_BASE = True # reset scrapy_interval scraper.scrapy_interval =", "scrapied urls and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented for", "default mode. Args: db - database instance scraper - scraper instance \"\"\" #", "records from database db.delete_artworks(artwork_ids) # convert artwork IDs to urls and add to", "if you want to scrape as login status' ) # base-url - sub-url", "# get expired artwork IDs from database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired", "valid sub-url defined in constant.py argparser.add_argument( '--begin-url', nargs = 1, help = 'begin", "scrapy_mode) # try to perform integrity check if not arguments.skip_check: if scrapy_mode ==", "from command line arguments = parse_arguments() # configure logger log_level = arguments.log_level[0].upper() logger", "[15], help = 'sets expire time(days) for scrapied images, default: 15' ) #", "'file': { 'class': 'logging.FileHandler', 'filename': 'fa_scraper.log', 'level': 'DEBUG', 'formatter': 'standard' } }, 'loggers':", "set expire time # only works when scrapy-mode is 'update' argparser.add_argument( '--expire-time', nargs", "scraper.cache.') exit(0) def parse_arguments(): \"\"\" Parse arguments from commandline. Args: None Returns: arguments", "else: logger.info('didn\\'t scrapy artwork in current round.') elif scrapy_mode == 'update': # get", "open('scraper.cache', 'wb') as temp: pickle.dump(scraper, temp) logger.info('successfully saved scrapying progress to scraper.cache.') exit(0)", "try to get artwork from scraper artwork = scraper.scrapy_pending_url() if artwork: # extend", "= [15], help = 'sets expire time(days) for scrapied images, default: 15' )", "= argparser.parse_args() return arguments def config_logger(console_log_level): \"\"\" Configure logger, should be called at", "type = int, default = [15], help = 'sets expire time(days) for scrapied", "check step argparser.add_argument( '--skip-check', action='store_true', help = 'skip integrity check(ONLY works in default", "filename, use cookies(json) provided to scrape as logined argparser.add_argument( '-c', '--cookies', nargs =", "def check_and_fix_artworks(db, scraper): \"\"\" Integrity check step. Traverse through database and see if", "used, needed if you want to scrape as login status' ) # base-url", "ID', remove it from set if artwork_id in artwork_ids: artwork_ids.remove(artwork_id) # remove remaining", "True: # scrapy loop # try to get artwork from scraper artwork =", "60' ) # cookies - filename, use cookies(json) provided to scrape as logined", "type = int, default = [60], help = 'sets sleep interval(seconds) between two", "scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working directory to origin.') logger.info('%u wrong records removed from database.'", "begin_url) logger.info('initialization completed.') scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy mode set to %s' % scrapy_mode)", "directory to origin.') logger.info('%u wrong records removed from database.' % len(artwork_ids)) if __name__", "pickle to dump scraper logger.info('exit signal received, saving scrapying progress...') logger.info('current scraper with", "= pickle.load(temp) logger.info('continued with last scrapying progress, with %u scrapied urls and %u", "default' ) # expire-time - int, set expire time # only works when", "default: default' ) # expire-time - int, set expire time # only works", "'standard': { 'format': '%(asctime)s - [%(levelname)s] %(message)s' } }, 'handlers': { 'console': {", "'handlers': ['console', 'file'], 'level': 'DEBUG', 'propagate': True } } } config['handlers']['console']['level'] = console_log_level", "verbosity level for console log messages, default: info' ) arguments = argparser.parse_args() return", "else: logger.info('skipped integrity check.') # main body if scrapy_mode == 'default': while True:", "help = 'sets verbosity level for console log messages, default: info' ) arguments", "from database, and add there urls to scraper's scrapying queue. ONLY works in", "to re-scrapy expired artwork(with ID: %u)\\'s info .' % artwork.get('ID')) db.close_db(conn) logger.info('exiting scraper...')", "'handlers': { 'console': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'standard' }, 'file': {", "scraper from scraper.cache with open('scraper.cache', 'rb') as temp: scraper = pickle.load(temp) logger.info('continued with", "%s' % information) # replace record in database db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy expired", "must be a valid sub-url defined in constant.py argparser.add_argument( '--begin-url', nargs = 1,", "image in images sub-directory. If there are artworks missing, remove them from database,", "variable scrapy.Scraper.SCRAPIED_BASE = True # reset scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0] else: cookies =", "% (len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented for potiential error # fix Scraper lazy", "is info, set the console log level argparser.add_argument( '--log-level', nargs = 1, default", "if not arguments.skip_check: if scrapy_mode == 'default': check_and_fix_artworks(db, scraper) logger.info('integrity check completed.') else:", "time(days) for scrapied images, default: 15' ) # scrapy-interval - int ,set scraper's", "json.dumps(artwork) logger.info('updated artwork information: %s' % information) # replace record in database db.insert_or_replace_artwork(artwork)", "- sub-url scraper to replace with default '/', must be a valid sub-url", "expired artwork IDs.') for artwork_id in expired_artwork_ids: # try to artwork attributes artwork", "database db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy expired artwork(with ID: %u)\\'s info .' % artwork.get('ID'))", "'-c', '--cookies', nargs = 1, help = 'specify the user cookies(json format file)", "nargs = 1, help = 'begin sub-URL to replace default \"/\", \"/user/blackdragonf\" for", "\"\"\" Configure logger, should be called at the very first of program. Args:", "argparser.add_argument( '-c', '--cookies', nargs = 1, help = 'specify the user cookies(json format", "mode) between database and images' ) # log-level - cen be choosen from", "= db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired artwork IDs.') for artwork_id in expired_artwork_ids: # try", "if for each artwork, there exists a corresponding image in images sub-directory. If", "db = database.Database('fa_scraper.db') if util.if_cache_exists(): # trying to load scraper from scraper.cache with", "logger.debug('changed working directory to origin.') logger.info('%u wrong records removed from database.' % len(artwork_ids))", "'fatal'], help = 'sets verbosity level for console log messages, default: info' )", "in artworks: if os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) # if exists image named 'artwork", "provided to scrape as logined argparser.add_argument( '-c', '--cookies', nargs = 1, help =", "logger.info('completed to scrapy artwork with ID: %u.' % artwork.get('ID')) else: logger.info('didn\\'t scrapy artwork", "= None if arguments.begin_url: # alternative begin-url specified begin_url = arguments.begin_url[0] scraper =", "user cookies(json format file) to be used, needed if you want to scrape", "console_log_level logging.config.dictConfig(config) logger = logging.getLogger('default') logger.info('set console log level to %s' % console_log_level)", "if exists image named 'artwork ID', remove it from set if artwork_id in", "received, use pickle to dump scraper logger.info('exit signal received, saving scrapying progress...') logger.info('current", "mode, default: default' ) # expire-time - int, set expire time # only", "information: %s' % information) # insert into database db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy artwork", ") # log-level - cen be choosen from 'debug', 'info', 'warning', 'error', 'fatal'", "%s' % console_log_level) logger.debug('logger configured.') return logger def check_and_fix_artworks(db, scraper): \"\"\" Integrity check", "there are artworks missing, remove them from database, and add there urls to", "from database db.delete_artworks(artwork_ids) # convert artwork IDs to urls and add to scrapying", "'default': while True: # scrapy loop # try to get artwork from scraper", "database instance scraper - scraper instance \"\"\" # get all artwork IDs from", "help = 'sets expire time(days) for scrapied images, default: 15' ) # scrapy-interval", "to dump scraper logger.info('exit signal received, saving scrapying progress...') logger.info('current scraper with %u", "argparser.add_argument( '-i', '--scrapy-interval', nargs = 1, type = int, default = [60], help", "% console_log_level) logger.debug('logger configured.') return logger def check_and_fix_artworks(db, scraper): \"\"\" Integrity check step.", "logging.getLogger('default') logger.info('set console log level to %s' % console_log_level) logger.debug('logger configured.') return logger", "sub-directory if not exists if not util.create_images_directory(): exit(-1) # set signal handler signal.signal(signal.SIGINT,", "util.create_images_directory(): exit(-1) # set signal handler signal.signal(signal.SIGINT, signal_handler) # initialize database and scraper", "can be choosen from 'default', 'update' # default is 'default', set scrapy mode", "you want to scrape as login status' ) # base-url - sub-url scraper", "frame): # exit signal received, use pickle to dump scraper logger.info('exit signal received,", "scraper = pickle.load(temp) logger.info('continued with last scrapying progress, with %u scrapied urls and", "'propagate': True } } } config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config) logger = logging.getLogger('default') logger.info('set", "and add there urls to scraper's scrapying queue. ONLY works in default mode.", "alternative begin-url specified begin_url = arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization completed.')", "all expired artwork IDs.') for artwork_id in expired_artwork_ids: # try to artwork attributes", "with %u scrapied urls and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache')", "os.chdir('..') logger.debug('changed working directory to origin.') logger.info('%u wrong records removed from database.' %", "= scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: # update added time and set ID artwork['ID'] =", "os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) # if exists image named 'artwork ID', remove it", "= int, default = [60], help = 'sets sleep interval(seconds) between two network", "os.chdir('images') logger.debug('changed working directory to images.') artworks = os.listdir('.') for artwork in artworks:", "status' ) # base-url - sub-url scraper to replace with default '/', must", "perform integrity check in update mode.') else: logger.info('skipped integrity check.') # main body", "%(message)s' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'standard'", "= artwork_id artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('updated artwork information: %s' %", "'formatter': 'standard' } }, 'loggers': { 'default': { 'handlers': ['console', 'file'], 'level': 'DEBUG',", "# reset scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0] else: cookies = {} if arguments.cookies: #", "artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: # update added time and set ID artwork['ID']", "needed if you want to scrape as login status' ) # base-url -", "begin_url = arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization completed.') scrapy_mode = arguments.scrapy_mode[0]", "time and set ID artwork['ID'] = artwork_id artwork['Added'] = util.get_current_time() information = json.dumps(artwork)", "as temp: scraper = pickle.load(temp) logger.info('continued with last scrapying progress, with %u scrapied", "configure logger log_level = arguments.log_level[0].upper() logger = config_logger(log_level) # create images sub-directory if", "import sys import os import signal import pickle import json import logging import", "be choosen from 'debug', 'info', 'warning', 'error', 'fatal' # default is info, set", "check_and_fix_artworks(db, scraper) logger.info('integrity check completed.') else: logger.info('will not perform integrity check in update", "\"\"\" Integrity check step. Traverse through database and see if for each artwork,", "# if exists image named 'artwork ID', remove it from set if artwork_id", "nargs = 1, default = ['default'], choices = ['default', 'update'], help = 'sets", "'skip integrity check(ONLY works in default mode) between database and images' ) #", "network requests, default: 60' ) # cookies - filename, use cookies(json) provided to", "sys.argv[0], description = 'A scraper of furaffinity.net written with python.' ) # scrapy-mode", "if arguments.cookies: # load provided cookies from file cookies = util.get_cookies(arguments.cookies[0]) begin_url =", "scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented for potiential error # fix", "choosen from 'debug', 'info', 'warning', 'error', 'fatal' # default is info, set the", "default: info' ) arguments = argparser.parse_args() return arguments def config_logger(console_log_level): \"\"\" Configure logger,", "save class variable scrapy.Scraper.SCRAPIED_BASE = True # reset scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0] else:", "% scrapy_mode) # try to perform integrity check if not arguments.skip_check: if scrapy_mode", "'sets sleep interval(seconds) between two network requests, default: 60' ) # cookies -", "- arguments parsed from command line \"\"\" argparser = argparse.ArgumentParser( usage = '%s", "line \"\"\" argparser = argparse.ArgumentParser( usage = '%s [OPTIONS]' % sys.argv[0], description =", "cookies(json) provided to scrape as logined argparser.add_argument( '-c', '--cookies', nargs = 1, help", "\"\"\" argparser = argparse.ArgumentParser( usage = '%s [OPTIONS]' % sys.argv[0], description = 'A", "15' ) # scrapy-interval - int ,set scraper's sleep interval between two requests", "working directory to origin.') logger.info('%u wrong records removed from database.' % len(artwork_ids)) if", "artwork information: %s' % information) # insert into database db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy", "to be used, needed if you want to scrape as login status' )", "argparse.ArgumentParser( usage = '%s [OPTIONS]' % sys.argv[0], description = 'A scraper of furaffinity.net", "fix Scraper lazy load *manually* because pickle will NOT save class variable scrapy.Scraper.SCRAPIED_BASE", "file) to be used, needed if you want to scrape as login status'", "{ 'format': '%(asctime)s - [%(levelname)s] %(message)s' } }, 'handlers': { 'console': { 'class':", "saved scrapying progress to scraper.cache.') exit(0) def parse_arguments(): \"\"\" Parse arguments from commandline.", "'begin sub-URL to replace default \"/\", \"/user/blackdragonf\" for example' ) # skip-check -", "database db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy artwork with ID: %u.' % artwork.get('ID')) else: logger.info('didn\\'t", "{ 'version': 1, 'formatters': { 'standard': { 'format': '%(asctime)s - [%(levelname)s] %(message)s' }", "works when scrapy-mode is 'update' argparser.add_argument( '--expire-time', nargs = 1, type = int,", "database.Database('fa_scraper.db') if util.if_cache_exists(): # trying to load scraper from scraper.cache with open('scraper.cache', 'rb')", "len(scraper.scrapying_queue))) with open('scraper.cache', 'wb') as temp: pickle.dump(scraper, temp) logger.info('successfully saved scrapying progress to", "remaining artwork records from database db.delete_artworks(artwork_ids) # convert artwork IDs to urls and", "db.delete_artworks(artwork_ids) # convert artwork IDs to urls and add to scrapying queue unscrapied_urls", "scrapying progress...') logger.info('current scraper with %u urls scrapied, and %u scrapying urls.' %", "log_level = arguments.log_level[0].upper() logger = config_logger(log_level) # create images sub-directory if not exists", "= 1, help = 'begin sub-URL to replace default \"/\", \"/user/blackdragonf\" for example'", "logger.info('skipped integrity check.') # main body if scrapy_mode == 'default': while True: #", "is 'update' argparser.add_argument( '--expire-time', nargs = 1, type = int, default = [15],", "in database db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy expired artwork(with ID: %u)\\'s info .' %", "'wb') as temp: pickle.dump(scraper, temp) logger.info('successfully saved scrapying progress to scraper.cache.') exit(0) def", "signal.signal(signal.SIGINT, signal_handler) # initialize database and scraper db = database.Database('fa_scraper.db') if util.if_cache_exists(): #", "\"/user/blackdragonf\" for example' ) # skip-check - when specified, skip integrity check step", "'fa_scraper.log', 'level': 'DEBUG', 'formatter': 'standard' } }, 'loggers': { 'default': { 'handlers': ['console',", "cookies, begin_url) logger.info('initialization completed.') scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy mode set to %s' %", "'sets expire time(days) for scrapied images, default: 15' ) # scrapy-interval - int", "= arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization completed.') scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy", "argparser.add_argument( '--expire-time', nargs = 1, type = int, default = [15], help =", "'--cookies', nargs = 1, help = 'specify the user cookies(json format file) to", "logined argparser.add_argument( '-c', '--cookies', nargs = 1, help = 'specify the user cookies(json", "are artworks missing, remove them from database, and add there urls to scraper's", "is 'default', set scrapy mode argparser.add_argument( '-m', '--scrapy-mode', nargs = 1, default =", "scraper instance \"\"\" # get all artwork IDs from artwork, and initialize a", "perform integrity check if not arguments.skip_check: if scrapy_mode == 'default': check_and_fix_artworks(db, scraper) logger.info('integrity", "expire-time - int, set expire time # only works when scrapy-mode is 'update'", "'--scrapy-interval', nargs = 1, type = int, default = [60], help = 'sets", "database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired artwork IDs.') for artwork_id in expired_artwork_ids:", "reset scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0] else: cookies = {} if arguments.cookies: # load", "'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'standard' }, 'file': { 'class': 'logging.FileHandler', 'filename': 'fa_scraper.log', 'level':", "program. Args: console_log_level - console log level, while log file level is fixed", "error # fix Scraper lazy load *manually* because pickle will NOT save class", "integrity check step argparser.add_argument( '--skip-check', action='store_true', help = 'skip integrity check(ONLY works in", "signal received, use pickle to dump scraper logger.info('exit signal received, saving scrapying progress...')", "in artwork_ids: artwork_ids.remove(artwork_id) # remove remaining artwork records from database db.delete_artworks(artwork_ids) # convert", "'specify the user cookies(json format file) to be used, needed if you want", ") # scrapy-interval - int ,set scraper's sleep interval between two requests argparser.add_argument(", "'--scrapy-mode', nargs = 1, default = ['default'], choices = ['default', 'update'], help =", "if __name__ == '__main__': # parse arguments from command line arguments = parse_arguments()", "# extend added time artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('scrapied artwork information:", ") # skip-check - when specified, skip integrity check step argparser.add_argument( '--skip-check', action='store_true',", "import os import signal import pickle import json import logging import logging.config def", "'format': '%(asctime)s - [%(levelname)s] %(message)s' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler',", "fixed to debug \"\"\" config = { 'version': 1, 'formatters': { 'standard': {", "not perform integrity check in update mode.') else: logger.info('skipped integrity check.') # main", "1, type = int, default = [15], help = 'sets expire time(days) for", "scrapy_mode == 'default': while True: # scrapy loop # try to get artwork", "scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy mode set to %s' % scrapy_mode) # try to", "'logging.FileHandler', 'filename': 'fa_scraper.log', 'level': 'DEBUG', 'formatter': 'standard' } }, 'loggers': { 'default': {", "artworks: if os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) # if exists image named 'artwork ID',", "os import signal import pickle import json import logging import logging.config def signal_handler(signum,", "artwork = scraper.scrapy_pending_url() if artwork: # extend added time artwork['Added'] = util.get_current_time() information", "for artwork_id in expired_artwork_ids: # try to artwork attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if", "int ,set scraper's sleep interval between two requests argparser.add_argument( '-i', '--scrapy-interval', nargs =", "# traverse through 'images' sub-directory os.chdir('images') logger.debug('changed working directory to images.') artworks =", "1, help = 'begin sub-URL to replace default \"/\", \"/user/blackdragonf\" for example' )", "logger def check_and_fix_artworks(db, scraper): \"\"\" Integrity check step. Traverse through database and see", "for scrapied images, default: 15' ) # scrapy-interval - int ,set scraper's sleep", ") # base-url - sub-url scraper to replace with default '/', must be", "console_log_level) logger.debug('logger configured.') return logger def check_and_fix_artworks(db, scraper): \"\"\" Integrity check step. Traverse", "while True: # scrapy loop # try to get artwork from scraper artwork", "expire time(days) for scrapied images, default: 15' ) # scrapy-interval - int ,set", "# insert into database db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy artwork with ID: %u.' %", "expired_artwork_ids: # try to artwork attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: # update", "saving scrapying progress...') logger.info('current scraper with %u urls scrapied, and %u scrapying urls.'", "if not util.create_images_directory(): exit(-1) # set signal handler signal.signal(signal.SIGINT, signal_handler) # initialize database", "artwork_ids.remove(artwork_id) # remove remaining artwork records from database db.delete_artworks(artwork_ids) # convert artwork IDs", "% sys.argv[0], description = 'A scraper of furaffinity.net written with python.' ) #", "artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) # if exists image named 'artwork ID', remove it from", "scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0] else: cookies = {} if arguments.cookies: # load provided", "to origin.') logger.info('%u wrong records removed from database.' % len(artwork_ids)) if __name__ ==", "'default', set scrapy mode argparser.add_argument( '-m', '--scrapy-mode', nargs = 1, default = ['default'],", "{ 'default': { 'handlers': ['console', 'file'], 'level': 'DEBUG', 'propagate': True } } }", "add there urls to scraper's scrapying queue. ONLY works in default mode. Args:", "None if arguments.begin_url: # alternative begin-url specified begin_url = arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0],", "body if scrapy_mode == 'default': while True: # scrapy loop # try to", "scrapied, and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache', 'wb') as temp:", "scraper db = database.Database('fa_scraper.db') if util.if_cache_exists(): # trying to load scraper from scraper.cache", "be choosen from 'default', 'update' # default is 'default', set scrapy mode argparser.add_argument(", "last scrapying progress, with %u scrapied urls and %u scrapying urls.' % (len(scraper.scrapied_set),", "%u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache', 'wb') as temp: pickle.dump(scraper, temp)", "artwork in current round.') elif scrapy_mode == 'update': # get expired artwork IDs", "= arguments.scrapy_mode[0] logger.info('scrapy mode set to %s' % scrapy_mode) # try to perform", "use cookies(json) provided to scrape as logined argparser.add_argument( '-c', '--cookies', nargs = 1,", "scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache', 'wb') as temp: pickle.dump(scraper, temp) logger.info('successfully", "'DEBUG', 'propagate': True } } } config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config) logger = logging.getLogger('default')", "in update mode.') else: logger.info('skipped integrity check.') # main body if scrapy_mode ==", "'--begin-url', nargs = 1, help = 'begin sub-URL to replace default \"/\", \"/user/blackdragonf\"", "# try to artwork attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: # update added", "nargs = 1, type = int, default = [15], help = 'sets expire", "set scrapy mode argparser.add_argument( '-m', '--scrapy-mode', nargs = 1, default = ['default'], choices", "from command line \"\"\" argparser = argparse.ArgumentParser( usage = '%s [OPTIONS]' % sys.argv[0],", "try to perform integrity check if not arguments.skip_check: if scrapy_mode == 'default': check_and_fix_artworks(db,", "first of program. Args: console_log_level - console log level, while log file level", "# try to get artwork from scraper artwork = scraper.scrapy_pending_url() if artwork: #", "it from set if artwork_id in artwork_ids: artwork_ids.remove(artwork_id) # remove remaining artwork records", "= logging.getLogger('default') logger.info('set console log level to %s' % console_log_level) logger.debug('logger configured.') return", "get all artwork IDs from artwork, and initialize a set artwork_ids = set(db.get_artwork_ids())", "be called at the very first of program. Args: console_log_level - console log", "['default', 'update'], help = 'sets scrapying mode, default: default' ) # expire-time -", "artwork, and initialize a set artwork_ids = set(db.get_artwork_ids()) # traverse through 'images' sub-directory", "level for console log messages, default: info' ) arguments = argparser.parse_args() return arguments", "logging.config.dictConfig(config) logger = logging.getLogger('default') logger.info('set console log level to %s' % console_log_level) logger.debug('logger", "Args: db - database instance scraper - scraper instance \"\"\" # get all", "scraper - scraper instance \"\"\" # get all artwork IDs from artwork, and", "artwork from scraper artwork = scraper.scrapy_pending_url() if artwork: # extend added time artwork['Added']", "ID artwork['ID'] = artwork_id artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('updated artwork information:", "'debug', 'info', 'warning', 'error', 'fatal' # default is info, set the console log", "exists image named 'artwork ID', remove it from set if artwork_id in artwork_ids:", "default = [60], help = 'sets sleep interval(seconds) between two network requests, default:", "named 'artwork ID', remove it from set if artwork_id in artwork_ids: artwork_ids.remove(artwork_id) #", "= json.dumps(artwork) logger.info('scrapied artwork information: %s' % information) # insert into database db.insert_or_replace_artwork(artwork)", "# expire-time - int, set expire time # only works when scrapy-mode is", "* import argparse import sys import os import signal import pickle import json", "images sub-directory if not exists if not util.create_images_directory(): exit(-1) # set signal handler", "database and see if for each artwork, there exists a corresponding image in", "'%s [OPTIONS]' % sys.argv[0], description = 'A scraper of furaffinity.net written with python.'", "Scraper lazy load *manually* because pickle will NOT save class variable scrapy.Scraper.SCRAPIED_BASE =", "choosen from 'default', 'update' # default is 'default', set scrapy mode argparser.add_argument( '-m',", "re-scrapy expired artwork(with ID: %u)\\'s info .' % artwork.get('ID')) db.close_db(conn) logger.info('exiting scraper...') exit(0)", "scrapy.Scraper.SCRAPIED_BASE = True # reset scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0] else: cookies = {}", "__name__ == '__main__': # parse arguments from command line arguments = parse_arguments() #", "{ 'class': 'logging.FileHandler', 'filename': 'fa_scraper.log', 'level': 'DEBUG', 'formatter': 'standard' } }, 'loggers': {", "elif scrapy_mode == 'update': # get expired artwork IDs from database expired_artwork_ids =", "all artwork IDs from artwork, and initialize a set artwork_ids = set(db.get_artwork_ids()) #", "urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented for potiential error # fix Scraper", "check.') # main body if scrapy_mode == 'default': while True: # scrapy loop", "logger.info('continued with last scrapying progress, with %u scrapied urls and %u scrapying urls.'", "Parse arguments from commandline. Args: None Returns: arguments - arguments parsed from command", "argparse import sys import os import signal import pickle import json import logging", "{ 'handlers': ['console', 'file'], 'level': 'DEBUG', 'propagate': True } } } config['handlers']['console']['level'] =", "sub-directory. If there are artworks missing, remove them from database, and add there", "= json.dumps(artwork) logger.info('updated artwork information: %s' % information) # replace record in database", "'version': 1, 'formatters': { 'standard': { 'format': '%(asctime)s - [%(levelname)s] %(message)s' } },", "artwork_ids = set(db.get_artwork_ids()) # traverse through 'images' sub-directory os.chdir('images') logger.debug('changed working directory to", "# alternative begin-url specified begin_url = arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization", "removed from database.' % len(artwork_ids)) if __name__ == '__main__': # parse arguments from", "for artwork in artworks: if os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) # if exists image", "and initialize a set artwork_ids = set(db.get_artwork_ids()) # traverse through 'images' sub-directory os.chdir('images')", "= 'sets verbosity level for console log messages, default: info' ) arguments =", "# default is info, set the console log level argparser.add_argument( '--log-level', nargs =", "'level': 'DEBUG', 'formatter': 'standard' } }, 'loggers': { 'default': { 'handlers': ['console', 'file'],", "'standard' } }, 'loggers': { 'default': { 'handlers': ['console', 'file'], 'level': 'DEBUG', 'propagate':", "scraper.scrapy_pending_url() if artwork: # extend added time artwork['Added'] = util.get_current_time() information = json.dumps(artwork)", "cookies = {} if arguments.cookies: # load provided cookies from file cookies =", "= 'A scraper of furaffinity.net written with python.' ) # scrapy-mode - can", "if arguments.begin_url: # alternative begin-url specified begin_url = arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies,", "# only works when scrapy-mode is 'update' argparser.add_argument( '--expire-time', nargs = 1, type", "'warning', 'error', 'fatal'], help = 'sets verbosity level for console log messages, default:", "arguments.skip_check: if scrapy_mode == 'default': check_and_fix_artworks(db, scraper) logger.info('integrity check completed.') else: logger.info('will not", "}, 'loggers': { 'default': { 'handlers': ['console', 'file'], 'level': 'DEBUG', 'propagate': True }", "progress...') logger.info('current scraper with %u urls scrapied, and %u scrapying urls.' % (len(scraper.scrapied_set),", "nargs = 1, type = int, default = [60], help = 'sets sleep", "[60], help = 'sets sleep interval(seconds) between two network requests, default: 60' )", "{ 'console': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'standard' }, 'file': { 'class':", "from set if artwork_id in artwork_ids: artwork_ids.remove(artwork_id) # remove remaining artwork records from", "arguments.cookies: # load provided cookies from file cookies = util.get_cookies(arguments.cookies[0]) begin_url = None", "usage = '%s [OPTIONS]' % sys.argv[0], description = 'A scraper of furaffinity.net written", "= parse_arguments() # configure logger log_level = arguments.log_level[0].upper() logger = config_logger(log_level) # create", "'default': check_and_fix_artworks(db, scraper) logger.info('integrity check completed.') else: logger.info('will not perform integrity check in", "cookies = util.get_cookies(arguments.cookies[0]) begin_url = None if arguments.begin_url: # alternative begin-url specified begin_url", "with open('scraper.cache', 'wb') as temp: pickle.dump(scraper, temp) logger.info('successfully saved scrapying progress to scraper.cache.')", "images' ) # log-level - cen be choosen from 'debug', 'info', 'warning', 'error',", "info, set the console log level argparser.add_argument( '--log-level', nargs = 1, default =", "set to %s' % scrapy_mode) # try to perform integrity check if not", "'update': # get expired artwork IDs from database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all", "logger.info('will not perform integrity check in update mode.') else: logger.info('skipped integrity check.') #", "with open('scraper.cache', 'rb') as temp: scraper = pickle.load(temp) logger.info('continued with last scrapying progress,", "pickle will NOT save class variable scrapy.Scraper.SCRAPIED_BASE = True # reset scrapy_interval scraper.scrapy_interval", "interval between two requests argparser.add_argument( '-i', '--scrapy-interval', nargs = 1, type = int,", "exit signal received, use pickle to dump scraper logger.info('exit signal received, saving scrapying", "'-m', '--scrapy-mode', nargs = 1, default = ['default'], choices = ['default', 'update'], help", "and see if for each artwork, there exists a corresponding image in images", "- int, set expire time # only works when scrapy-mode is 'update' argparser.add_argument(", "= list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working directory to origin.') logger.info('%u wrong records", "parse_arguments() # configure logger log_level = arguments.log_level[0].upper() logger = config_logger(log_level) # create images", "specified begin_url = arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization completed.') scrapy_mode =", "when scrapy-mode is 'update' argparser.add_argument( '--expire-time', nargs = 1, type = int, default", "a valid sub-url defined in constant.py argparser.add_argument( '--begin-url', nargs = 1, help =", "- database instance scraper - scraper instance \"\"\" # get all artwork IDs", "sub-URL to replace default \"/\", \"/user/blackdragonf\" for example' ) # skip-check - when", "received, saving scrapying progress...') logger.info('current scraper with %u urls scrapied, and %u scrapying", "if not exists if not util.create_images_directory(): exit(-1) # set signal handler signal.signal(signal.SIGINT, signal_handler)", "scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: # update added time and set ID artwork['ID'] = artwork_id", "= 'sets sleep interval(seconds) between two network requests, default: 60' ) # cookies", "parsed from command line \"\"\" argparser = argparse.ArgumentParser( usage = '%s [OPTIONS]' %", "for example' ) # skip-check - when specified, skip integrity check step argparser.add_argument(", "info' ) arguments = argparser.parse_args() return arguments def config_logger(console_log_level): \"\"\" Configure logger, should", "list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working directory to origin.') logger.info('%u wrong records removed from", "util.get_current_time() information = json.dumps(artwork) logger.info('scrapied artwork information: %s' % information) # insert into", "nargs = 1, help = 'specify the user cookies(json format file) to be", "= int, default = [15], help = 'sets expire time(days) for scrapied images,", "= util.get_cookies(arguments.cookies[0]) begin_url = None if arguments.begin_url: # alternative begin-url specified begin_url =", "%u scrapied urls and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented", "scrapy mode argparser.add_argument( '-m', '--scrapy-mode', nargs = 1, default = ['default'], choices =", "insert into database db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy artwork with ID: %u.' % artwork.get('ID'))", "- scraper instance \"\"\" # get all artwork IDs from artwork, and initialize", "artwork attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: # update added time and set", "artwork IDs.') for artwork_id in expired_artwork_ids: # try to artwork attributes artwork =", "logger.info('completed to re-scrapy expired artwork(with ID: %u)\\'s info .' % artwork.get('ID')) db.close_db(conn) logger.info('exiting", "parse arguments from command line arguments = parse_arguments() # configure logger log_level =", "== '__main__': # parse arguments from command line arguments = parse_arguments() # configure", "= arguments.scrapy_interval[0] else: cookies = {} if arguments.cookies: # load provided cookies from", "nargs = 1, default = ['info'], choices = ['debug', 'info', 'warning', 'error', 'fatal'],", "information: %s' % information) # replace record in database db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy", "database db.delete_artworks(artwork_ids) # convert artwork IDs to urls and add to scrapying queue", "# default is 'default', set scrapy mode argparser.add_argument( '-m', '--scrapy-mode', nargs = 1,", "cookies(json format file) to be used, needed if you want to scrape as", "to scrape as login status' ) # base-url - sub-url scraper to replace", "# scrapy-mode - can be choosen from 'default', 'update' # default is 'default',", "# parse arguments from command line arguments = parse_arguments() # configure logger log_level", "artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('updated artwork information: %s' % information) #", "arguments = argparser.parse_args() return arguments def config_logger(console_log_level): \"\"\" Configure logger, should be called", "temp: pickle.dump(scraper, temp) logger.info('successfully saved scrapying progress to scraper.cache.') exit(0) def parse_arguments(): \"\"\"", "artwork['ID'] = artwork_id artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('updated artwork information: %s'", "initialize a set artwork_ids = set(db.get_artwork_ids()) # traverse through 'images' sub-directory os.chdir('images') logger.debug('changed", ") # scrapy-mode - can be choosen from 'default', 'update' # default is", "if artwork: # update added time and set ID artwork['ID'] = artwork_id artwork['Added']", "logger, should be called at the very first of program. Args: console_log_level -", "because pickle will NOT save class variable scrapy.Scraper.SCRAPIED_BASE = True # reset scrapy_interval", "= ['default'], choices = ['default', 'update'], help = 'sets scrapying mode, default: default'", "as logined argparser.add_argument( '-c', '--cookies', nargs = 1, help = 'specify the user", "logger log_level = arguments.log_level[0].upper() logger = config_logger(log_level) # create images sub-directory if not", "default '/', must be a valid sub-url defined in constant.py argparser.add_argument( '--begin-url', nargs", "exists a corresponding image in images sub-directory. If there are artworks missing, remove", "and add to scrapying queue unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working", "# configure logger log_level = arguments.log_level[0].upper() logger = config_logger(log_level) # create images sub-directory", "if scrapy_mode == 'default': while True: # scrapy loop # try to get", "pickle import json import logging import logging.config def signal_handler(signum, frame): # exit signal", "from scraper.cache with open('scraper.cache', 'rb') as temp: scraper = pickle.load(temp) logger.info('continued with last", "python.' ) # scrapy-mode - can be choosen from 'default', 'update' # default", "# trying to load scraper from scraper.cache with open('scraper.cache', 'rb') as temp: scraper", "'warning', 'error', 'fatal' # default is info, set the console log level argparser.add_argument(", "= console_log_level logging.config.dictConfig(config) logger = logging.getLogger('default') logger.info('set console log level to %s' %", "from file cookies = util.get_cookies(arguments.cookies[0]) begin_url = None if arguments.begin_url: # alternative begin-url", "# convert artwork IDs to urls and add to scrapying queue unscrapied_urls =", "unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working directory to origin.') logger.info('%u wrong", "default is 'default', set scrapy mode argparser.add_argument( '-m', '--scrapy-mode', nargs = 1, default", "else: logger.info('will not perform integrity check in update mode.') else: logger.info('skipped integrity check.')", "with ID: %u.' % artwork.get('ID')) else: logger.info('didn\\'t scrapy artwork in current round.') elif", "replace default \"/\", \"/user/blackdragonf\" for example' ) # skip-check - when specified, skip", "should be called at the very first of program. Args: console_log_level - console", "default: 15' ) # scrapy-interval - int ,set scraper's sleep interval between two", "to scraper.cache.') exit(0) def parse_arguments(): \"\"\" Parse arguments from commandline. Args: None Returns:", "logger.info('set console log level to %s' % console_log_level) logger.debug('logger configured.') return logger def", "<reponame>BlackDragonF/FurAffinityScraper from fa_scraper import * import argparse import sys import os import signal", "if artwork: # extend added time artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('scrapied", "database.' % len(artwork_ids)) if __name__ == '__main__': # parse arguments from command line", "to %s' % console_log_level) logger.debug('logger configured.') return logger def check_and_fix_artworks(db, scraper): \"\"\" Integrity", "len(artwork_ids)) if __name__ == '__main__': # parse arguments from command line arguments =", "% information) # insert into database db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy artwork with ID:", "default = ['info'], choices = ['debug', 'info', 'warning', 'error', 'fatal'], help = 'sets", "completed.') else: logger.info('will not perform integrity check in update mode.') else: logger.info('skipped integrity", "json import logging import logging.config def signal_handler(signum, frame): # exit signal received, use", "log messages, default: info' ) arguments = argparser.parse_args() return arguments def config_logger(console_log_level): \"\"\"", "artwork IDs to urls and add to scrapying queue unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids)))", "handler signal.signal(signal.SIGINT, signal_handler) # initialize database and scraper db = database.Database('fa_scraper.db') if util.if_cache_exists():", "specified, skip integrity check step argparser.add_argument( '--skip-check', action='store_true', help = 'skip integrity check(ONLY", "% (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache', 'wb') as temp: pickle.dump(scraper, temp) logger.info('successfully saved scrapying", "arguments.scrapy_interval[0] else: cookies = {} if arguments.cookies: # load provided cookies from file", "} config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config) logger = logging.getLogger('default') logger.info('set console log level to", "artwork.get('ID')) else: logger.info('didn\\'t scrapy artwork in current round.') elif scrapy_mode == 'update': #", "'update' # default is 'default', set scrapy mode argparser.add_argument( '-m', '--scrapy-mode', nargs =", "image named 'artwork ID', remove it from set if artwork_id in artwork_ids: artwork_ids.remove(artwork_id)", "as temp: pickle.dump(scraper, temp) logger.info('successfully saved scrapying progress to scraper.cache.') exit(0) def parse_arguments():", "mode set to %s' % scrapy_mode) # try to perform integrity check if", "{} if arguments.cookies: # load provided cookies from file cookies = util.get_cookies(arguments.cookies[0]) begin_url", "%s' % scrapy_mode) # try to perform integrity check if not arguments.skip_check: if", "to get artwork from scraper artwork = scraper.scrapy_pending_url() if artwork: # extend added", "scrapying mode, default: default' ) # expire-time - int, set expire time #", "import argparse import sys import os import signal import pickle import json import", "'filename': 'fa_scraper.log', 'level': 'DEBUG', 'formatter': 'standard' } }, 'loggers': { 'default': { 'handlers':", "set if artwork_id in artwork_ids: artwork_ids.remove(artwork_id) # remove remaining artwork records from database", "% len(artwork_ids)) if __name__ == '__main__': # parse arguments from command line arguments", "create images sub-directory if not exists if not util.create_images_directory(): exit(-1) # set signal", "log-level - cen be choosen from 'debug', 'info', 'warning', 'error', 'fatal' # default", "IDs from database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired artwork IDs.') for artwork_id", "'artwork ID', remove it from set if artwork_id in artwork_ids: artwork_ids.remove(artwork_id) # remove", "'update' argparser.add_argument( '--expire-time', nargs = 1, type = int, default = [15], help", "log level argparser.add_argument( '--log-level', nargs = 1, default = ['info'], choices = ['debug',", "= set(db.get_artwork_ids()) # traverse through 'images' sub-directory os.chdir('images') logger.debug('changed working directory to images.')", "- can be choosen from 'default', 'update' # default is 'default', set scrapy", "time # only works when scrapy-mode is 'update' argparser.add_argument( '--expire-time', nargs = 1,", "load scraper from scraper.cache with open('scraper.cache', 'rb') as temp: scraper = pickle.load(temp) logger.info('continued", "import json import logging import logging.config def signal_handler(signum, frame): # exit signal received,", "in images sub-directory. If there are artworks missing, remove them from database, and", "set artwork_ids = set(db.get_artwork_ids()) # traverse through 'images' sub-directory os.chdir('images') logger.debug('changed working directory", "mode.') else: logger.info('skipped integrity check.') # main body if scrapy_mode == 'default': while", "queue unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working directory to origin.') logger.info('%u", "integrity check(ONLY works in default mode) between database and images' ) # log-level", "log level to %s' % console_log_level) logger.debug('logger configured.') return logger def check_and_fix_artworks(db, scraper):", "to scrape as logined argparser.add_argument( '-c', '--cookies', nargs = 1, help = 'specify", "# fix Scraper lazy load *manually* because pickle will NOT save class variable", "scrapying queue. ONLY works in default mode. Args: db - database instance scraper", "a corresponding image in images sub-directory. If there are artworks missing, remove them", "if util.if_cache_exists(): # trying to load scraper from scraper.cache with open('scraper.cache', 'rb') as", "and set ID artwork['ID'] = artwork_id artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('updated", "with %u urls scrapied, and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache',", "logger.info('exit signal received, saving scrapying progress...') logger.info('current scraper with %u urls scrapied, and", "'--expire-time', nargs = 1, type = int, default = [15], help = 'sets", "# remove remaining artwork records from database db.delete_artworks(artwork_ids) # convert artwork IDs to", "Traverse through database and see if for each artwork, there exists a corresponding", "extend added time artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('scrapied artwork information: %s'", "artwork: # update added time and set ID artwork['ID'] = artwork_id artwork['Added'] =", "to scrapying queue unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working directory to", "artwork IDs from database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired artwork IDs.') for", "- [%(levelname)s] %(message)s' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'level': 'DEBUG',", "'level': 'DEBUG', 'propagate': True } } } config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config) logger =", "% information) # replace record in database db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy expired artwork(with", "works in default mode) between database and images' ) # log-level - cen", "to load scraper from scraper.cache with open('scraper.cache', 'rb') as temp: scraper = pickle.load(temp)", ") # expire-time - int, set expire time # only works when scrapy-mode", "directory to images.') artworks = os.listdir('.') for artwork in artworks: if os.path.isfile(artwork): artwork_id", "scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization completed.') scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy mode set to %s'", "IDs.') for artwork_id in expired_artwork_ids: # try to artwork attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id))", "'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'standard' }, 'file': { 'class': 'logging.FileHandler', 'filename': 'fa_scraper.log',", "'standard' }, 'file': { 'class': 'logging.FileHandler', 'filename': 'fa_scraper.log', 'level': 'DEBUG', 'formatter': 'standard' }", "pickle.load(temp) logger.info('continued with last scrapying progress, with %u scrapied urls and %u scrapying", "to replace default \"/\", \"/user/blackdragonf\" for example' ) # skip-check - when specified,", "util.get_current_time() information = json.dumps(artwork) logger.info('updated artwork information: %s' % information) # replace record", "# log-level - cen be choosen from 'debug', 'info', 'warning', 'error', 'fatal' #", "into database db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy artwork with ID: %u.' % artwork.get('ID')) else:", "['info'], choices = ['debug', 'info', 'warning', 'error', 'fatal'], help = 'sets verbosity level", "= 'sets scrapying mode, default: default' ) # expire-time - int, set expire", "# load provided cookies from file cookies = util.get_cookies(arguments.cookies[0]) begin_url = None if", "%u urls scrapied, and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache', 'wb')", "logger.info('%u wrong records removed from database.' % len(artwork_ids)) if __name__ == '__main__': #", "to artwork attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: # update added time and", "pickle.dump(scraper, temp) logger.info('successfully saved scrapying progress to scraper.cache.') exit(0) def parse_arguments(): \"\"\" Parse", "command line \"\"\" argparser = argparse.ArgumentParser( usage = '%s [OPTIONS]' % sys.argv[0], description", "check(ONLY works in default mode) between database and images' ) # log-level -", "convert artwork IDs to urls and add to scrapying queue unscrapied_urls = list(map(util.generate_url_from_id,", "see if for each artwork, there exists a corresponding image in images sub-directory.", "get artwork from scraper artwork = scraper.scrapy_pending_url() if artwork: # extend added time", "to debug \"\"\" config = { 'version': 1, 'formatters': { 'standard': { 'format':", "db.insert_or_replace_artwork(artwork) logger.info('completed to scrapy artwork with ID: %u.' % artwork.get('ID')) else: logger.info('didn\\'t scrapy", "to urls and add to scrapying queue unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..')", "expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired artwork IDs.') for artwork_id in expired_artwork_ids: #", "1, default = ['info'], choices = ['debug', 'info', 'warning', 'error', 'fatal'], help =", "IDs to urls and add to scrapying queue unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls)", "expire time # only works when scrapy-mode is 'update' argparser.add_argument( '--expire-time', nargs =", "load provided cookies from file cookies = util.get_cookies(arguments.cookies[0]) begin_url = None if arguments.begin_url:", "information = json.dumps(artwork) logger.info('scrapied artwork information: %s' % information) # insert into database", "logger.info('current scraper with %u urls scrapied, and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue)))", "Configure logger, should be called at the very first of program. Args: console_log_level", "# create images sub-directory if not exists if not util.create_images_directory(): exit(-1) # set", "{ 'standard': { 'format': '%(asctime)s - [%(levelname)s] %(message)s' } }, 'handlers': { 'console':", "scraper to replace with default '/', must be a valid sub-url defined in", "log file level is fixed to debug \"\"\" config = { 'version': 1,", "= ['default', 'update'], help = 'sets scrapying mode, default: default' ) # expire-time", "{ 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'standard' }, 'file': { 'class': 'logging.FileHandler', 'filename':", "not util.create_images_directory(): exit(-1) # set signal handler signal.signal(signal.SIGINT, signal_handler) # initialize database and", "help = 'specify the user cookies(json format file) to be used, needed if", "works in default mode. Args: db - database instance scraper - scraper instance", "scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization completed.') scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy mode set", "from commandline. Args: None Returns: arguments - arguments parsed from command line \"\"\"", "exit(-1) # set signal handler signal.signal(signal.SIGINT, signal_handler) # initialize database and scraper db", "main body if scrapy_mode == 'default': while True: # scrapy loop # try", "lazy load *manually* because pickle will NOT save class variable scrapy.Scraper.SCRAPIED_BASE = True", "= 'begin sub-URL to replace default \"/\", \"/user/blackdragonf\" for example' ) # skip-check", "# skip-check - when specified, skip integrity check step argparser.add_argument( '--skip-check', action='store_true', help", "logger.info('scrapy mode set to %s' % scrapy_mode) # try to perform integrity check", "from scraper artwork = scraper.scrapy_pending_url() if artwork: # extend added time artwork['Added'] =", "[OPTIONS]' % sys.argv[0], description = 'A scraper of furaffinity.net written with python.' )", "replace record in database db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy expired artwork(with ID: %u)\\'s info", "if scrapy_mode == 'default': check_and_fix_artworks(db, scraper) logger.info('integrity check completed.') else: logger.info('will not perform", "completed.') scrapy_mode = arguments.scrapy_mode[0] logger.info('scrapy mode set to %s' % scrapy_mode) # try", "console log level to %s' % console_log_level) logger.debug('logger configured.') return logger def check_and_fix_artworks(db,", "in default mode) between database and images' ) # log-level - cen be", "requests argparser.add_argument( '-i', '--scrapy-interval', nargs = 1, type = int, default = [60],", "console log messages, default: info' ) arguments = argparser.parse_args() return arguments def config_logger(console_log_level):", "interval(seconds) between two network requests, default: 60' ) # cookies - filename, use", "scrapy_mode == 'update': # get expired artwork IDs from database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0])", "level argparser.add_argument( '--log-level', nargs = 1, default = ['info'], choices = ['debug', 'info',", "True } } } config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config) logger = logging.getLogger('default') logger.info('set console", "arguments.scrapy_mode[0] logger.info('scrapy mode set to %s' % scrapy_mode) # try to perform integrity", "== 'default': while True: # scrapy loop # try to get artwork from", "# scrapy loop # try to get artwork from scraper artwork = scraper.scrapy_pending_url()", "len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented for potiential error # fix Scraper lazy load *manually*", "parse_arguments(): \"\"\" Parse arguments from commandline. Args: None Returns: arguments - arguments parsed", "description = 'A scraper of furaffinity.net written with python.' ) # scrapy-mode -", "logger.info('didn\\'t scrapy artwork in current round.') elif scrapy_mode == 'update': # get expired", "signal received, saving scrapying progress...') logger.info('current scraper with %u urls scrapied, and %u", "information) # replace record in database db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy expired artwork(with ID:", "artwork, there exists a corresponding image in images sub-directory. If there are artworks", "scraper): \"\"\" Integrity check step. Traverse through database and see if for each", "be used, needed if you want to scrape as login status' ) #", "artworks = os.listdir('.') for artwork in artworks: if os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) #", "} } } config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config) logger = logging.getLogger('default') logger.info('set console log", "default \"/\", \"/user/blackdragonf\" for example' ) # skip-check - when specified, skip integrity", "instance \"\"\" # get all artwork IDs from artwork, and initialize a set", "util.if_cache_exists(): # trying to load scraper from scraper.cache with open('scraper.cache', 'rb') as temp:", "in expired_artwork_ids: # try to artwork attributes artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id)) if artwork: #", "two network requests, default: 60' ) # cookies - filename, use cookies(json) provided", "record in database db.insert_or_replace_artwork(artwork) logger.info('completed to re-scrapy expired artwork(with ID: %u)\\'s info .'", "== 'default': check_and_fix_artworks(db, scraper) logger.info('integrity check completed.') else: logger.info('will not perform integrity check", "'loggers': { 'default': { 'handlers': ['console', 'file'], 'level': 'DEBUG', 'propagate': True } }", "'file'], 'level': 'DEBUG', 'propagate': True } } } config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config) logger", "= util.get_current_time() information = json.dumps(artwork) logger.info('scrapied artwork information: %s' % information) # insert", "%u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) # os.remove('scraper.cache') commented for potiential error #", "between two requests argparser.add_argument( '-i', '--scrapy-interval', nargs = 1, type = int, default", "= True # reset scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0] else: cookies = {} if", "scrapy-interval - int ,set scraper's sleep interval between two requests argparser.add_argument( '-i', '--scrapy-interval',", "= [60], help = 'sets sleep interval(seconds) between two network requests, default: 60'", "log level, while log file level is fixed to debug \"\"\" config =", ",set scraper's sleep interval between two requests argparser.add_argument( '-i', '--scrapy-interval', nargs = 1,", "the very first of program. Args: console_log_level - console log level, while log", "debug \"\"\" config = { 'version': 1, 'formatters': { 'standard': { 'format': '%(asctime)s", "artwork records from database db.delete_artworks(artwork_ids) # convert artwork IDs to urls and add", "scraper.scrapy_interval = arguments.scrapy_interval[0] else: cookies = {} if arguments.cookies: # load provided cookies", "get expired artwork IDs from database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired artwork", "Args: None Returns: arguments - arguments parsed from command line \"\"\" argparser =", "commandline. Args: None Returns: arguments - arguments parsed from command line \"\"\" argparser", "messages, default: info' ) arguments = argparser.parse_args() return arguments def config_logger(console_log_level): \"\"\" Configure", "step. Traverse through database and see if for each artwork, there exists a", "argparser = argparse.ArgumentParser( usage = '%s [OPTIONS]' % sys.argv[0], description = 'A scraper", "'level': 'DEBUG', 'formatter': 'standard' }, 'file': { 'class': 'logging.FileHandler', 'filename': 'fa_scraper.log', 'level': 'DEBUG',", "# os.remove('scraper.cache') commented for potiential error # fix Scraper lazy load *manually* because", "information = json.dumps(artwork) logger.info('updated artwork information: %s' % information) # replace record in", "1, default = ['default'], choices = ['default', 'update'], help = 'sets scrapying mode,", "from fa_scraper import * import argparse import sys import os import signal import", "and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache', 'wb') as temp: pickle.dump(scraper,", "the console log level argparser.add_argument( '--log-level', nargs = 1, default = ['info'], choices", "= 'sets expire time(days) for scrapied images, default: 15' ) # scrapy-interval -", "if os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) # if exists image named 'artwork ID', remove", "1, help = 'specify the user cookies(json format file) to be used, needed", "*manually* because pickle will NOT save class variable scrapy.Scraper.SCRAPIED_BASE = True # reset", "artwork: # extend added time artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('scrapied artwork", "'DEBUG', 'formatter': 'standard' } }, 'loggers': { 'default': { 'handlers': ['console', 'file'], 'level':", "time artwork['Added'] = util.get_current_time() information = json.dumps(artwork) logger.info('scrapied artwork information: %s' % information)", "level, while log file level is fixed to debug \"\"\" config = {", "} } config['handlers']['console']['level'] = console_log_level logging.config.dictConfig(config) logger = logging.getLogger('default') logger.info('set console log level", "each artwork, there exists a corresponding image in images sub-directory. If there are", "missing, remove them from database, and add there urls to scraper's scrapying queue.", "int, default = [60], help = 'sets sleep interval(seconds) between two network requests,", "default is info, set the console log level argparser.add_argument( '--log-level', nargs = 1,", "NOT save class variable scrapy.Scraper.SCRAPIED_BASE = True # reset scrapy_interval scraper.scrapy_interval = arguments.scrapy_interval[0]", "begin-url specified begin_url = arguments.begin_url[0] scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url) logger.info('initialization completed.') scrapy_mode", "not arguments.skip_check: if scrapy_mode == 'default': check_and_fix_artworks(db, scraper) logger.info('integrity check completed.') else: logger.info('will", "instance scraper - scraper instance \"\"\" # get all artwork IDs from artwork,", "very first of program. Args: console_log_level - console log level, while log file", "help = 'sets sleep interval(seconds) between two network requests, default: 60' ) #", "loop # try to get artwork from scraper artwork = scraper.scrapy_pending_url() if artwork:", "from 'debug', 'info', 'warning', 'error', 'fatal' # default is info, set the console", "} }, 'loggers': { 'default': { 'handlers': ['console', 'file'], 'level': 'DEBUG', 'propagate': True", "Returns: arguments - arguments parsed from command line \"\"\" argparser = argparse.ArgumentParser( usage", "add to scrapying queue unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids))) scraper.add_unscrapied_urls(unscrapied_urls) os.chdir('..') logger.debug('changed working directory", "format file) to be used, needed if you want to scrape as login", "default: 60' ) # cookies - filename, use cookies(json) provided to scrape as", "scraper's scrapying queue. ONLY works in default mode. Args: db - database instance", "'rb') as temp: scraper = pickle.load(temp) logger.info('continued with last scrapying progress, with %u", "json.dumps(artwork) logger.info('scrapied artwork information: %s' % information) # insert into database db.insert_or_replace_artwork(artwork) logger.info('completed", "with default '/', must be a valid sub-url defined in constant.py argparser.add_argument( '--begin-url',", "import * import argparse import sys import os import signal import pickle import", "'update'], help = 'sets scrapying mode, default: default' ) # expire-time - int,", "choices = ['debug', 'info', 'warning', 'error', 'fatal'], help = 'sets verbosity level for", "exit(0) def parse_arguments(): \"\"\" Parse arguments from commandline. Args: None Returns: arguments -", "in default mode. Args: db - database instance scraper - scraper instance \"\"\"", "# update added time and set ID artwork['ID'] = artwork_id artwork['Added'] = util.get_current_time()", "db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired artwork IDs.') for artwork_id in expired_artwork_ids: # try to", "import signal import pickle import json import logging import logging.config def signal_handler(signum, frame):", "between database and images' ) # log-level - cen be choosen from 'debug',", "integrity check if not arguments.skip_check: if scrapy_mode == 'default': check_and_fix_artworks(db, scraper) logger.info('integrity check", "return logger def check_and_fix_artworks(db, scraper): \"\"\" Integrity check step. Traverse through database and", "config_logger(log_level) # create images sub-directory if not exists if not util.create_images_directory(): exit(-1) #", "'default': { 'handlers': ['console', 'file'], 'level': 'DEBUG', 'propagate': True } } } config['handlers']['console']['level']", "remove them from database, and add there urls to scraper's scrapying queue. ONLY", "artworks missing, remove them from database, and add there urls to scraper's scrapying", "scrapied images, default: 15' ) # scrapy-interval - int ,set scraper's sleep interval", "= argparse.ArgumentParser( usage = '%s [OPTIONS]' % sys.argv[0], description = 'A scraper of", "sleep interval(seconds) between two network requests, default: 60' ) # cookies - filename,", "['default'], choices = ['default', 'update'], help = 'sets scrapying mode, default: default' )", "set(db.get_artwork_ids()) # traverse through 'images' sub-directory os.chdir('images') logger.debug('changed working directory to images.') artworks", "int(os.path.splitext(os.path.basename(artwork))[0]) # if exists image named 'artwork ID', remove it from set if", "expired artwork IDs from database expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0]) logger.info('retrieved all expired artwork IDs.')", "sleep interval between two requests argparser.add_argument( '-i', '--scrapy-interval', nargs = 1, type =", "argparser.add_argument( '-m', '--scrapy-mode', nargs = 1, default = ['default'], choices = ['default', 'update'],", "artwork in artworks: if os.path.isfile(artwork): artwork_id = int(os.path.splitext(os.path.basename(artwork))[0]) # if exists image named", "\"/\", \"/user/blackdragonf\" for example' ) # skip-check - when specified, skip integrity check", "scrapy_mode == 'default': check_and_fix_artworks(db, scraper) logger.info('integrity check completed.') else: logger.info('will not perform integrity", "(len(scraper.scrapied_set), len(scraper.scrapying_queue))) with open('scraper.cache', 'wb') as temp: pickle.dump(scraper, temp) logger.info('successfully saved scrapying progress", "cookies - filename, use cookies(json) provided to scrape as logined argparser.add_argument( '-c', '--cookies',", "scraper.cache with open('scraper.cache', 'rb') as temp: scraper = pickle.load(temp) logger.info('continued with last scrapying", "step argparser.add_argument( '--skip-check', action='store_true', help = 'skip integrity check(ONLY works in default mode)", "console log level, while log file level is fixed to debug \"\"\" config", "int, set expire time # only works when scrapy-mode is 'update' argparser.add_argument( '--expire-time'," ]
[ "(len(tau_colors) is not len(tau)): tau_colors = [(.5, .5, .5) for t in tau]", "to the renderer, should not be used for animations :param save: save view", "actor for wire frame box of the simulation domain \"\"\" (w, h, d)", "of colors does not match number of cell types - default to grey\"", "0 self.update = update_func self.tmax = tmax self.update_actors = None self.save = save", ".5) for t in tau] if (tau_alpha is None) or (len(tau_alpha) is not", "to visualize :param tau_list: list of cell types :param show: initialize and start", "args.color_xmin is not None: bnd['-x'] = args.color_xmin if args.color_ymin is not None: bnd['-y']", "None return reader.GetOutput() def visualize(self, step, tau_list, show=False, save=False, impath=None, imprefix=None, bbox=True, tau_alpha=None,", "match number of cell types - default to opaque objects\" args.alpha = [1", "reader.ReadAllScalarsOn() reader.Update() data = reader.GetOutput() if data.GetPointData().HasArray('cell.id') != 1: print \"'cell.id' array missing", "enumerate(tau) if t in update_tau] update_alpha = [tau_alpha[i] for i, t in enumerate(tau)", "actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2]) return actor def _get_box_actor(self): \"\"\" Create and return actor", "= '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print 'save image {}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write() return", "tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None, imprefix=None, fps=5, static_tau=None): \"\"\" Animate simulation results :param", "h, d) = self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData() imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w, h, d)", "parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static cell types (will NOT be updated during animation)\") parser.add_argument(\"--bboxcolor\",", "be updated during the animation \"\"\" if (tau_colors is None) or (len(tau_colors) is", "data.GetPointData().HasArray('cell.id') != 1: print \"'cell.id' array missing from {} -> skip file\".format(fn) return", "'focal point': args.camfocus} # create visualizer v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props,", "= save def execute(self, obj, event): iren = obj win = iren.GetRenderWindow() ren", "camera settings for the renderer. Available options: - position - focal point -", "len(args.colors) == 1: args.colors = [get_color(args.colors[0]) for t in args.celltypes] elif len(args.colors) <", "3) # Add the polygon to a list of polygons polygons = vtk.vtkCellArray()", "args.camfocus} # create visualizer v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall),", "self.renderWindowInteractor.Start() if save: w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if", "for p in pix: points.InsertNextPoint(p[0] - .5, p[1] - .5, p[2] - .5)", "array missing from {} -> skip file\".format(fn) return None if data.GetPointData().HasArray('cell.type') != 1:", "_set_renderer(self, winsize, bg): \"\"\" Set up vtk renderer \"\"\" self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0],", "create and store movie if args.movie and found_im2movie: if args.moviedir is None: args.moviedir", "[1 for tau in tau_list] # get actors stepdata = self._get_step(step) if stepdata", "= False # get new actors actors = self.update(t, self.save) self.update_actors = actors", "polygon to a list of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a", "tau_alpha: list with opacity per cell type :param bbox: show bounding box :returns:", "help=\"movie directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all data at once", "for i, tau in enumerate(tau_list)] # get bounding box wire frame if bbox:", "actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor def _get_actor_for_tau(self, stepdata,", "= vtk.vtkPoints() f = 0 if '-' in tp else 1 if 'x'", "else 1 if 'x' in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y' in", "sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim, order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim,", "help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2, help=\"window size\", default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\", type=int,", "\"\"\"Visualizes data on a cubic lattice Built specifically to visualize the VTK files", "= self.update(t, self.save) self.update_actors = actors self.timer_count += 1 class Visualizer3D(): \"\"\" Create", "get new actors actors = self.update(t, self.save) self.update_actors = actors self.timer_count += 1", "correct value t = self.timer_count if self.timer_count >= self.tmax: t = self.timer_count %", "for actor in actors: self.renderer.AddActor(actor) return actors def _modify_cam(self): \"\"\" Modify the camera", "[get_color(\"grey\") for t in args.celltypes] elif len(args.colors) == 1: args.colors = [get_color(args.colors[0]) for", "writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print 'save image {}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write() return actors def", "parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2, help=\"window size\", default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\", help=\"cell", "color name \"\"\" cc = colors.ColorConverter() if name in colors.cnames: return cc.to_rgb(name) else:", "\"--colors\", type=str, nargs=\"*\", help=\"colors or the cell types\") parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\", help=\"opacity", "type :param tau_alpha: list with opacity per cell type :param steps: steps (all", "bnd_colors # read data get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk', '')) if steps is", "\"--outdir\", type=str, help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save", "None: bnd['z'] = args.color_zmax if args.color_xmin is not None: bnd['-x'] = args.color_xmin if", "tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static) # create and store movie if", "writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is not None and imprefix.endswith('_'): imprefix =", "tp else 1 if 'x' in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y'", "not os.path.isdir(args.simdir): sys.exit(\"Could not find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No vtk files", "actors def _modify_cam(self): \"\"\" Modify the camera settings for the renderer. Available options:", "in steps} else: self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir))} if", "and start the render window after adding the actors to the renderer, should", "print \"'cell.id' array missing from {} -> skip file\".format(fn) return None if data.GetPointData().HasArray('cell.type')", "animate(self, tau, tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None, imprefix=None, fps=5, static_tau=None): \"\"\" Animate simulation", "\"--alpha\", type=float, nargs=\"*\", help=\"opacity of the cell types\") parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static cell", "when not specified) :param save: save view to png :param impath: path to", "bnd['-y'] = args.color_ymin if args.color_zmin is not None: bnd['-z'] = args.color_zmin if len(bnd)", "lambda t, s: self.visualize(steps[t], update_tau, show=False, save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath) cb", "onthefly: read data on the fly instead of all at once \"\"\" def", "from vtk.util import numpy_support as VN from matplotlib import colors found_im2movie = True", "impath: path to store image :param imprefix: image prefix :param bbox: show bounding", "parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all data at once before the visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\",", "tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is None: static_tau = [] update_tau = [t for t", "memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float,", "if tau_colors is None: tau_colors = [(0.5, 0.5, 0.5) for tau in tau_list]", "args.movie and found_im2movie: if args.moviedir is None: args.moviedir = args.outdir if args.moviename is", "point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print 'add", "show_tau]) points = vtk.vtkPoints() for s in show_idx: if s not in sigma:", "box :returns: list of actors with first the actors for tau_list followed by", "vtk renderer \"\"\" self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer);", "actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5,", "and imprefix.endswith('_'): imprefix = imprefix + '_' if imprefix is None: imprefix =", "bounding box wire frame if bbox: actors.append(self._get_box_actor()) if bnd is not None: for", "be updated for actor in self.update_actors: ren.RemoveActor(actor) # set t to correct value", "in tp else 1 if 'x' in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif", "(tau_colors is None) or (len(tau_colors) is not len(tau)): tau_colors = [(.5, .5, .5)", "the actors for tau_list followed by the bounding box (if applicable) \"\"\" #", "__maintainer__ = \"<NAME>\" # the vtkTimerCallback takes care of updating the visualzation class", "5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2]) return actor", "not specified - default to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes]", "tau = tau.reshape(dim, order='F') show_idx = np.unique(sigma[tau == show_tau]) points = vtk.vtkPoints() for", "\"Number of colors does not match number of cell types - default to", "= [get_color(\"grey\") for t in args.celltypes] else: args.colors = [get_color(c) for c in", "types (will NOT be updated during animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1, 1, 1),", "folder\") parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2, help=\"window size\", default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\",", "[get_color(args.colors[0]) for t in args.celltypes] elif len(args.colors) < len(args.celltypes): print \"Number of colors", "class vtkTimerCallback(): def __init__(self, update_func, tmax=1, save=False): self.timer_count = 0 self.update = update_func", "not len(tau)): tau_colors = [(.5, .5, .5) for t in tau] if (tau_alpha", "nargs=\"*\", help=\"opacity of the cell types\") parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static cell types (will", "get_color(color) else: color = (0.5, 0.5, 0.5) dim = stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id'))", "import numpy_support as VN from matplotlib import colors found_im2movie = True try: from", "imprefix, step)) print 'save image {}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write() return actors def animate(self,", "Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem), bnd_colors=bnd) # start animation", "of cell types - default to grey\" args.colors = [get_color(\"grey\") for t in", "_load_data(self, fn): \"\"\" Load vtk files \"\"\" reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update()", ":param bbox: show bounding box :param tau_alpha: list with opacity per cell type", "None: static_tau = [] update_tau = [t for t in tau if t", "return parser.parse_args() def main(): args = parse_args() # check if there is something", "print \"Cell color not specified - default to grey\" args.colors = [get_color(\"grey\") for", "{self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} # setup renderer self._set_renderer(winsize, bg) def _get_step(self,step): \"\"\" Retrieve vtk", "movie if args.movie and found_im2movie: if args.moviedir is None: args.moviedir = args.outdir if", "= sigma.reshape(dim, order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim, order='F') show_idx = np.unique(sigma[tau", "cell type :param tau_alpha: list with opacity per cell type :param bbox: show", "lambda fn: int(fn.split('_')[-1].replace('.vtk', '')) if steps is not None: self.files = {get_num(f) :", "import numpy as np import vtk from vtk.util import numpy_support as VN from", "types\") parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static cell types (will NOT be updated during animation)\")", "name \"\"\" cc = colors.ColorConverter() if name in colors.cnames: return cc.to_rgb(name) else: return", "actors for tau_list followed by the bounding box (if applicable) \"\"\" # set", "# get bounding box wire frame if bbox: actors.append(self._get_box_actor()) if bnd is not", "is not None: for tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add actors to the", "= actors self.timer_count += 1 class Visualizer3D(): \"\"\" Create visualizer object :param simdir:", "vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper) return actor def _load_data(self, fn): \"\"\" Load", "if bbox: actors.append(self._get_box_actor()) if bnd is not None: for tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color))", "default to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] elif len(args.colors) ==", "args.imprefix: args.imprefix = \"frame\" # set camera cam_props = {'position': args.camposition, 'focal point':", "else: if self.storeafterread: self.data[step] = self._load_data(self.files[step]) return self.data[step] else: return self._load_data(self.files[step]) def _set_renderer(self,", "during the animation \"\"\" if (tau_colors is None) or (len(tau_colors) is not len(tau)):", "points.InsertNextPoint(f*w,0,d) elif 'y' in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z' in tp:", "show bounding box :param tau_alpha: list with opacity per cell type :param tau_colors:", "for tau_list followed by the bounding box (if applicable) \"\"\" # set default", "'y' in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z' in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d)", "\"Number of alpha values does not match number of cell types - default", "[] else: actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for i, tau in enumerate(tau_list)]", "cell types :param show: initialize and start the render window after adding the", "' + str(int(step))) actors = self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props", "to animate if not os.path.isdir(args.simdir): sys.exit(\"Could not find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0:", "steps.sort() self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0], tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau", "-> skip file\".format(fn) return None if data.GetPointData().HasArray('cell.type') != 1: print \"'cell.id' array missing", "= vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper) return actor", "they are not specified if tau_colors is None: tau_colors = [(0.5, 0.5, 0.5)", "opacity per cell type :param steps: steps (all steps are shown when not", "required=True) parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\", help=\"colors or the cell types\") parser.add_argument(\"-a\", \"--alpha\", type=float,", "focal point are not given, they will be taken from the camera in", "actors = self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props is not None:", "if get_num(f) in steps} else: self.files = {get_num(f) : f for f in", "actors: self.renderer.AddActor(actor) return actors def _modify_cam(self): \"\"\" Modify the camera settings for the", "self.timer_count = 0 self.update = update_func self.tmax = tmax self.update_actors = None self.save", "\"\"\" Create and return actor for wire frame box of the simulation domain", "value t = self.timer_count if self.timer_count >= self.tmax: t = self.timer_count % self.tmax", "None: tau_alpha = [1 for tau in tau_list] # get actors stepdata =", "actors = self.update(t, self.save) self.update_actors = actors self.timer_count += 1 class Visualizer3D(): \"\"\"", "in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a", "reader.GetOutput() def visualize(self, step, tau_list, show=False, save=False, impath=None, imprefix=None, bbox=True, tau_alpha=None, tau_colors=None, bnd=None):", "cc.to_rgb(name) else: return cc.to_rgb(\"grey\") def parse_args(): parser = argparse.ArgumentParser() # parser.description(\"Animate 3D Morpheus", "actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2]) return actor def _get_box_actor(self): \"\"\" Create", "self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if 'focal point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint())", "# set t to correct value t = self.timer_count if self.timer_count >= self.tmax:", "imprefix is not None and imprefix.endswith('_'): imprefix = imprefix + '_' if imprefix", "update_func, tmax=1, save=False): self.timer_count = 0 self.update = update_func self.tmax = tmax self.update_actors", "reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data = reader.GetOutput() if data.GetPointData().HasArray('cell.id') != 1: print \"'cell.id' array", "data on the fly instead of all at once \"\"\" def __init__(self, simdir,", "\"--fps\", type=float, default=5, help=\"frames per second\") parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\",", "for a specific step \"\"\" if step in self.data: return self.data[step] else: if", "cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId = timerId", "if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save: w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer =", "list of actors with first the actors for tau_list followed by the bounding", "args.celltypes] elif len(args.colors) < len(args.celltypes): print \"Number of colors does not match number", "set default colors and opacity when they are not specified if tau_colors is", "self.renderer.GetActiveCamera(); cam = vtk.vtkCamera() if 'position' in self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if 'focal", "is None: args.moviename = args.imprefix makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir, args.fps, win=args.win, tomp4=args.mp4)", "to opaque objects\" args.alpha = [1 for t in args.celltypes] bnd = {}", "name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all data at once before the visualization starts\") parser.add_argument(\"--savemem\",", "nargs=3, default=(-200, 200, 200), help=\"camera position\") parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100, 100, 50), help=\"camera", "return self._load_data(self.files[step]) def _set_renderer(self, winsize, bg): \"\"\" Set up vtk renderer \"\"\" self.renderer", "= get_color(color) else: color = (0.5, 0.5, 0.5) dim = stepdata.GetDimensions() sigma =", "(all steps are shown when not specified) :param save: save view to png", ":param step: step to visualize :param tau_list: list of cell types :param tau_colors:", "self._set_renderer(winsize, bg) def _get_step(self,step): \"\"\" Retrieve vtk data for a specific step \"\"\"", "import makeMovie except ImportError: found_im2movie = False __author__ = \"<NAME>\" __copyright__ = \"Copyright", "types\") parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\", help=\"opacity of the cell types\") parser.add_argument(\"--static\", type=int, nargs=\"*\",", "c in args.colors] if not args.alpha: print \"Alpha values not specified - default", "in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps} else: self.files = {get_num(f) : f for", "enumerate(tau) if t in update_tau] update_func = lambda t, s: self.visualize(steps[t], update_tau, show=False,", "parser.add_argument(\"-f\", \"--fps\", type=float, default=5, help=\"frames per second\") parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output directory\") parser.add_argument(\"-p\",", "win=args.win, tomp4=args.mp4) elif not found_im2movie: print \"WARNING: Movie generation is turned of because", "type=str, nargs=\"*\", help=\"colors or the cell types\") parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\", help=\"opacity of", "is not None: bnd['x'] = args.color_xmax if args.color_ymax is not None: bnd['y'] =", "help=\"cell types to animate\", required=True) parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\", help=\"colors or the cell", "parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\", help=\"cell types to animate\", required=True) parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\",", "closing the visualization window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie name\") parser.add_argument(\"--readall\",", "'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print 'add boundary for {} with", "- .5, p[2] - .5) polydata = vtk.vtkPolyData() polydata.SetPoints(points) sources = vtk.vtkCubeSource() sources.Update()", "windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3)", "actors that will be updated for actor in self.update_actors: ren.RemoveActor(actor) # set t", "return self.data[step] else: if self.storeafterread: self.data[step] = self._load_data(self.files[step]) return self.data[step] else: return self._load_data(self.files[step])", "_modify_cam(self): \"\"\" Modify the camera settings for the renderer. Available options: - position", "len(args.celltypes): print \"Number of colors does not match number of cell types -", "stepdata = self._get_step(step) if stepdata is None: return [] else: actors = [self._get_actor_for_tau(stepdata,", "type=float, nargs=3, default=(0, 0, 0), help=\"background color\") parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200, 200, 200),", ":param impath: path to store image :param imprefix: image prefix :param fps: frames", "self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1])", "steps} else: self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir))} if not", "polydata.SetPoints(points) sources = vtk.vtkCubeSource() sources.Update() glyph = vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata)", "args.moviedir is None: args.moviedir = args.outdir if args.moviename is None: args.moviename = args.imprefix", ":param cam_props: dictionary with camera settings :param onthefly: read data on the fly", "def _load_data(self, fn): \"\"\" Load vtk files \"\"\" reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn()", "and opacity if not args.colors: print \"Cell color not specified - default to", "in glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly: self.data = {n : self._load_data(f) for n,f in", "self.storeafterread = storeafterread self.bnd_colors = bnd_colors # read data get_num = lambda fn:", "not specified) :param save: save view to png :param impath: path to store", "show_idx = np.unique(sigma[tau == show_tau]) points = vtk.vtkPoints() for s in show_idx: if", "results :param tau: list of cell types :param tau_colors: list with color per", "numpy as np import vtk from vtk.util import numpy_support as VN from matplotlib", "# parser.add_argument(\"--campitch\", type=float, default=, help=\"camera pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps to animate, all", "timer self.renderWindowInteractor.Start() def get_color(name): \"\"\" Get color for matplotlib color name \"\"\" cc", "parse_args() # check if there is something to animate if not os.path.isdir(args.simdir): sys.exit(\"Could", "Create visualizer object :param simdir: path to folder containing vtk files :param steps:", "except ImportError: found_im2movie = False __author__ = \"<NAME>\" __copyright__ = \"Copyright 2016\" __credits__", "update_colors = [tau_colors[i] for i, t in enumerate(tau) if t in update_tau] update_alpha", "vtkTimerCallback takes care of updating the visualzation class vtkTimerCallback(): def __init__(self, update_func, tmax=1,", "list of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData =", "show=False, save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath) cb = vtkTimerCallback(update_func, len(steps), save) if", "self.renderWindowInteractor.Start() def get_color(name): \"\"\" Get color for matplotlib color name \"\"\" cc =", "not args.imprefix: args.imprefix = \"frame\" # set camera cam_props = {'position': args.camposition, 'focal", "if args.color_ymax is not None: bnd['y'] = args.color_ymax if args.color_zmax is not None:", "tau_colors[i], tau_alpha[i]) for i, tau in enumerate(tau_list)] # get bounding box wire frame", "= vtk.vtkCubeSource() sources.Update() glyph = vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata)", "None: impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print 'save image {}/{}{:03d}.png'.format(impath, imprefix, step)", "= update_func self.tmax = tmax self.update_actors = None self.save = save def execute(self,", "set camera cam_props = {'position': args.camposition, 'focal point': args.camfocus} # create visualizer v", "stepdata, show_tau, color=(0.5, 0.5, 0.5), opacity=1): \"\"\" Create actor for a cell type", "tau] if (tau_alpha is None) or (len(tau_alpha) is not len(tau)): tau_alpha = [1", "tmax self.update_actors = None self.save = save def execute(self, obj, event): iren =", "else: mapper.SetInputData(imageData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor def", "with opacity per cell type :param bbox: show bounding box :returns: list of", "vtk files :param steps: steps to visualize :param winsize: window size :param bg:", "return self.data[step] else: return self._load_data(self.files[step]) def _set_renderer(self, winsize, bg): \"\"\" Set up vtk", "import vtk from vtk.util import numpy_support as VN from matplotlib import colors found_im2movie", "args.alpha = [1 for t in args.celltypes] elif len(args.alpha) == 1: args.alpha =", "0: bnd = {} # set saving options if args.imprefix or args.outdir or", "= cam_props self.storeafterread = storeafterread self.bnd_colors = bnd_colors # read data get_num =", "args.color_zmax if args.color_xmin is not None: bnd['-x'] = args.color_xmin if args.color_ymin is not", "args.moviename, args.outdir, args.moviedir, args.fps, win=args.win, tomp4=args.mp4) elif not found_im2movie: print \"WARNING: Movie generation", "vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2])", "NOT be updated during animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1, 1, 1), help=\"bounding box", "in args.celltypes] else: args.colors = [get_color(c) for c in args.colors] if not args.alpha:", "in pix: points.InsertNextPoint(p[0] - .5, p[1] - .5, p[2] - .5) polydata =", "in args.celltypes] bnd = {} if args.color_xmax is not None: bnd['x'] = args.color_xmax", "elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No vtk files found in {}\".format(args.simdir)) # set colors", "= bnd_colors # read data get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk', '')) if steps", "writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is not None and imprefix.endswith('_'): imprefix = imprefix + '_'", "len(args.colors) < len(args.celltypes): print \"Number of colors does not match number of cell", "f in glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly: self.data = {n : self._load_data(f) for n,f", "for tau in tau_list] # get actors stepdata = self._get_step(step) if stepdata is", "cam_props = {'position': args.camposition, 'focal point': args.camfocus} # create visualizer v = Visualizer3D(args.simdir,", "actor.SetMapper(mapper) return actor def _load_data(self, fn): \"\"\" Load vtk files \"\"\" reader =", "actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper) return actor def _load_data(self, fn): \"\"\" Load vtk", "args.color_zmax is not None: bnd['z'] = args.color_zmax if args.color_xmin is not None: bnd['-x']", "tau_colors: list with color per cell type \"\"\" self.renderWindow.SetWindowName('step ' + str(int(step))) actors", "bounding box (if applicable) \"\"\" # set default colors and opacity when they", "cell types that should not be updated during the animation \"\"\" if (tau_colors", "a cell type \"\"\" if isinstance(color, basestring): # convert color to rgb string", "in args.colors] if not args.alpha: print \"Alpha values not specified - default to", "if len(bnd) == 0: bnd = {} # set saving options if args.imprefix", "in static_tau] update_colors = [tau_colors[i] for i, t in enumerate(tau) if t in", "in self.update_actors: ren.RemoveActor(actor) # set t to correct value t = self.timer_count if", "{}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No vtk files found in {}\".format(args.simdir)) # set", "args.outdir if args.moviename is None: args.moviename = args.imprefix makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir,", "args.fps, win=args.win, tomp4=args.mp4) elif not found_im2movie: print \"WARNING: Movie generation is turned of", "t to correct value t = self.timer_count if self.timer_count >= self.tmax: t =", "is not None and imprefix.endswith('_'): imprefix = imprefix + '_' if imprefix is", "of all at once \"\"\" def __init__(self, simdir, steps=None, winsize=(800, 800), bg=(0, 0,", "w2i.SetInput(self.renderWindow) w2i.Update() writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is not None and imprefix.endswith('_'):", "impath=None, imprefix=None, bbox=True, tau_alpha=None, tau_colors=None, bnd=None): \"\"\" Visualize a given step. :param step:", ":param tau: list of cell types :param tau_colors: list with color per cell", "and store movie if args.movie and found_im2movie: if args.moviedir is None: args.moviedir =", "imprefix=None, fps=5, static_tau=None): \"\"\" Animate simulation results :param tau: list of cell types", "= [1 for t in args.celltypes] elif len(args.alpha) == 1: args.alpha = [args.alpha", "# start animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static) #", "on a cubic lattice Built specifically to visualize the VTK files created by", "\"<NAME>\" __license__ = \"MIT\" __version__ = \"0.1\" __maintainer__ = \"<NAME>\" # the vtkTimerCallback", ": f for f in glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly: self.data = {n :", "cell type :param bbox: show bounding box :returns: list of actors with first", "def _get_step(self,step): \"\"\" Retrieve vtk data for a specific step \"\"\" if step", "self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId = timerId # start the", "the renderer. Available options: - position - focal point - pitch If position", "tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static) # create and store movie", "bnd = {} if args.color_xmax is not None: bnd['x'] = args.color_xmax if args.color_ymax", "actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper) return actor def _load_data(self, fn):", "{} -> skip file\".format(fn) return None if data.GetPointData().HasArray('cell.type') != 1: print \"'cell.id' array", "colors and opacity when they are not specified if tau_colors is None: tau_colors", "steps are shown when not specified) :param save: save view to png :param", "per cell type :param tau_alpha: list with opacity per cell type :param bbox:", "directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all data at once before", "imprefix is None: imprefix = '' if impath is None: impath = '.'", "points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y' in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif", "if (tau_alpha is None) or (len(tau_alpha) is not len(tau)): tau_alpha = [1 for", "in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add actors to the renderer for actor in actors:", "1: print \"'cell.id' array missing from {} -> skip file\".format(fn) return None return", "parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3)", "= True try: from im2movie import makeMovie except ImportError: found_im2movie = False __author__", "pitch If position and focal point are not given, they will be taken", "prefix :param bbox: show bounding box :param tau_alpha: list with opacity per cell", "parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all data", "get bounding box wire frame if bbox: actors.append(self._get_box_actor()) if bnd is not None:", "bnd['-x'] = args.color_xmin if args.color_ymin is not None: bnd['-y'] = args.color_ymin if args.color_zmin", "instead of \" \"keeping it in memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie windows compatible\")", "imageData.SetOrigin(0, 0, 0) mapper = vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData)", "[get_color(c) for c in args.colors] if not args.alpha: print \"Alpha values not specified", "for s in show_idx: if s not in sigma: continue pix = np.column_stack(np.where(sigma", "\"\"\" if step in self.data: return self.data[step] else: if self.storeafterread: self.data[step] = self._load_data(self.files[step])", "[t for t in tau if t not in static_tau] update_colors = [tau_colors[i]", "# convert color to rgb string if color in colors.cnames: color = get_color(color)", "== 1: args.colors = [get_color(args.colors[0]) for t in args.celltypes] elif len(args.colors) < len(args.celltypes):", "glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps} else: self.files = {get_num(f) : f for f", "if there is something to animate if not os.path.isdir(args.simdir): sys.exit(\"Could not find {}\".format(args.simdir))", "remove all actors that will be updated for actor in self.update_actors: ren.RemoveActor(actor) #", "opacity per cell type :param tau_colors: list with color per cell type \"\"\"", "print \"WARNING: Movie generation is turned of because im2movie was not found\" if", "a cubic lattice Built specifically to visualize the VTK files created by Morpheus", "[(.5, .5, .5) for t in tau] if (tau_alpha is None) or (len(tau_alpha)", "vtk.vtkCubeSource() sources.Update() glyph = vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort())", "- default to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] else: args.colors", "to the renderer for actor in actors: self.renderer.AddActor(actor) return actors def _modify_cam(self): \"\"\"", "points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z' in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon()", "glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly: self.data = {n : self._load_data(f) for n,f in self.files.iteritems()}", "0) mapper = vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor =", ":param imprefix: image prefix :param fps: frames per second :param static_tau: static cell", "actors for a list of cell types and add them to the renderer", "color = get_color(color) else: color = (0.5, 0.5, 0.5) dim = stepdata.GetDimensions() sigma", "= vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper and actor mapper = vtk.vtkPolyDataMapper()", "args.moviename = args.imprefix makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir, args.fps, win=args.win, tomp4=args.mp4) elif not", ":param tau_alpha: list with opacity per cell type :param tau_colors: list with color", "= \"frame\" # set camera cam_props = {'position': args.camposition, 'focal point': args.camfocus} #", "tau in enumerate(tau_list)] # get bounding box wire frame if bbox: actors.append(self._get_box_actor()) if", "makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir, args.fps, win=args.win, tomp4=args.mp4) elif not found_im2movie: print \"WARNING:", "self.data[step] = self._load_data(self.files[step]) return self.data[step] else: return self._load_data(self.files[step]) def _set_renderer(self, winsize, bg): \"\"\"", "if args.imprefix or args.outdir or args.movie: args.saveim = True if args.saveim: if not", "self.update_actors = actors self.timer_count += 1 class Visualizer3D(): \"\"\" Create visualizer object :param", "self._load_data(self.files[step]) def _set_renderer(self, winsize, bg): \"\"\" Set up vtk renderer \"\"\" self.renderer =", "if this \" \"is not specified\") parser.add_argument(\"-f\", \"--fps\", type=float, default=5, help=\"frames per second\")", "bnd['z'] = args.color_zmax if args.color_xmin is not None: bnd['-x'] = args.color_xmin if args.color_ymin", "vtk.vtkPoints() f = 0 if '-' in tp else 1 if 'x' in", "obj win = iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer() # remove all actors that will", ":param step: step to visualize :param tau_list: list of cell types :param show:", "help=\"reread vtk file every time it is used instead of \" \"keeping it", "types :param tau_colors: list with color per cell type :param tau_alpha: list with", "args.color_ymin if args.color_zmin is not None: bnd['-z'] = args.color_zmin if len(bnd) == 0:", "makeMovie except ImportError: found_im2movie = False __author__ = \"<NAME>\" __copyright__ = \"Copyright 2016\"", "bounding box :param tau_alpha: list with opacity per cell type :param tau_colors: list", "cell type :param tau_colors: list with color per cell type \"\"\" self.renderWindow.SetWindowName('step '", "boundary for {} with color {}'.format(tp,color) (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() points =", "imprefix, step) writer.Write() return actors def animate(self, tau, tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None,", "= [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for i, tau in enumerate(tau_list)] # get bounding", "color for matplotlib color name \"\"\" cc = colors.ColorConverter() if name in colors.cnames:", "self.update_actors = None self.save = save def execute(self, obj, event): iren = obj", "self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps}", "= vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor def _get_actor_for_tau(self, stepdata, show_tau,", "iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer() # remove all actors that will be updated for", "print \"Number of alpha values does not match number of cell types -", "Retrieve vtk data for a specific step \"\"\" if step in self.data: return", "None: self._modify_cam() if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save: w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update()", "a specific step \"\"\" if step in self.data: return self.data[step] else: if self.storeafterread:", "type=str, help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save images\")", "bnd['x'] = args.color_xmax if args.color_ymax is not None: bnd['y'] = args.color_ymax if args.color_zmax", "basestring): # convert color to rgb string if color in colors.cnames: color =", "tau in tau_list] # get actors stepdata = self._get_step(step) if stepdata is None:", "is something to animate if not os.path.isdir(args.simdir): sys.exit(\"Could not find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir)))", "elif 'z' in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) #", "steps to visualize :param winsize: window size :param bg: background color :param bbox_color:", "mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor()", "[tau_alpha[i] for i, t in enumerate(tau) if t in update_tau] update_func = lambda", "not None and imprefix.endswith('_'): imprefix = imprefix + '_' if imprefix is None:", "from im2movie import makeMovie except ImportError: found_im2movie = False __author__ = \"<NAME>\" __copyright__", "args.color_xmax is not None: bnd['x'] = args.color_xmax if args.color_ymax is not None: bnd['y']", "not None: bnd['-x'] = args.color_xmin if args.color_ymin is not None: bnd['-y'] = args.color_ymin", ":param fps: frames per second :param static_tau: static cell types that should not", "to the renderer :param step: step to visualize :param tau_list: list of cell", "parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk file every time it is used instead of \"", "return actors def animate(self, tau, tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None, imprefix=None, fps=5, static_tau=None):", "is turned of because im2movie was not found\" if __name__ == \"__main__\": main()", "elif len(args.colors) == 1: args.colors = [get_color(args.colors[0]) for t in args.celltypes] elif len(args.colors)", "for actor in self.update_actors: ren.RemoveActor(actor) # set t to correct value t =", "1: args.alpha = [args.alpha for t in args.celltypes] elif len(args.alpha) < len(args.celltypes): print", "if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1],", "< len(args.celltypes): print \"Number of alpha values does not match number of cell", "of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData = vtk.vtkPolyData()", "wire frame if bbox: actors.append(self._get_box_actor()) if bnd is not None: for tp,color in", "{} with color {}'.format(tp,color) (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints() f", "obj, event): iren = obj win = iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer() # remove", "mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2]) return actor def _get_box_actor(self): \"\"\"", "colors found_im2movie = True try: from im2movie import makeMovie except ImportError: found_im2movie =", "if '-' in tp else 1 if 'x' in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d)", "color per cell type :param tau_alpha: list with opacity per cell type :param", "actor in actors: self.renderer.AddActor(actor) return actors def _modify_cam(self): \"\"\" Modify the camera settings", "color[1], color[2]) return actor def _get_box_actor(self): \"\"\" Create and return actor for wire", "points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0, 0)", "args.imprefix makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir, args.fps, win=args.win, tomp4=args.mp4) elif not found_im2movie: print", "bnd_colors=None): self.bbox_color = bbox_color self.cam_props = cam_props self.storeafterread = storeafterread self.bnd_colors = bnd_colors", "dictionary with camera settings :param onthefly: read data on the fly instead of", "vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0],", "will be shown if this \" \"is not specified\") parser.add_argument(\"-f\", \"--fps\", type=float, default=5,", "the renderer. :param renderer: vtk renderer :param cam_props: dictionary with options (see above)", "= None self.save = save def execute(self, obj, event): iren = obj win", "0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add the polygon to a", "- .5, p[1] - .5, p[2] - .5) polydata = vtk.vtkPolyData() polydata.SetPoints(points) sources", "quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add the polygon", "actor.GetProperty().SetColor(color[0], color[1], color[2]) return actor def _get_box_actor(self): \"\"\" Create and return actor for", "imprefix=imprefix, impath=impath) cb = vtkTimerCallback(update_func, len(steps), save) if len(actors) > 0: cb.update_actors =", "= iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer() # remove all actors that will be updated", "action=\"store_true\", help=\"make movie windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float,", "cam = vtk.vtkCamera() if 'position' in self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if 'focal point'", "1: print \"'cell.id' array missing from {} -> skip file\".format(fn) return None if", "actors self.timer_count += 1 class Visualizer3D(): \"\"\" Create visualizer object :param simdir: path", "= args.color_xmax if args.color_ymax is not None: bnd['y'] = args.color_ymax if args.color_zmax is", "= [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId = timerId #", "types to animate\", required=True) parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\", help=\"colors or the cell types\")", "# Create a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper", "point': args.camfocus} # create visualizer v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not", "f for f in glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly: self.data = {n : self._load_data(f)", "enumerate(tau_list)] # get bounding box wire frame if bbox: actors.append(self._get_box_actor()) if bnd is", "show=False, save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is None: static_tau = [] update_tau", "for t in tau if t not in static_tau] else: cb.update_actors = []", "something to animate if not os.path.isdir(args.simdir): sys.exit(\"Could not find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) ==", ":param steps: steps (all steps are shown when not specified) :param save: save", "updated for actor in self.update_actors: ren.RemoveActor(actor) # set t to correct value t", "for t in tau if t not in static_tau] update_colors = [tau_colors[i] for", "self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId = timerId # start the interaction and timer self.renderWindowInteractor.Start()", "__author__ = \"<NAME>\" __copyright__ = \"Copyright 2016\" __credits__ = \"<NAME>\" __license__ = \"MIT\"", "= args.simdir if not os.path.isdir(args.outdir): print \"Create output directory {}\".format(args.outdir) os.makedirs(args.outdir) if not", "cell types and add them to the renderer :param step: step to visualize", "- position - focal point - pitch If position and focal point are", "= Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem), bnd_colors=bnd) # start", "cell types :param tau_colors: list with color per cell type :param tau_alpha: list", "self.renderer.AddActor(actor) return actors def _modify_cam(self): \"\"\" Modify the camera settings for the renderer.", "t in enumerate(tau) if t in update_tau] update_alpha = [tau_alpha[i] for i, t", "# parser.description(\"Animate 3D Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\",", ">= self.tmax: t = self.timer_count % self.tmax self.save = False # get new", "= \"<NAME>\" __license__ = \"MIT\" __version__ = \"0.1\" __maintainer__ = \"<NAME>\" # the", "focal point - pitch If position and focal point are not given, they", "type=str, help=\"movie directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all data at", "args.moviename is None: args.moviename = args.imprefix makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir, args.fps, win=args.win,", "with first the actors for tau_list followed by the bounding box (if applicable)", "{}\".format(args.outdir) os.makedirs(args.outdir) if not args.imprefix: args.imprefix = \"frame\" # set camera cam_props =", "steps will be shown if this \" \"is not specified\") parser.add_argument(\"-f\", \"--fps\", type=float,", "vtk from vtk.util import numpy_support as VN from matplotlib import colors found_im2movie =", "iren = obj win = iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer() # remove all actors", "- .5) polydata = vtk.vtkPolyData() polydata.SetPoints(points) sources = vtk.vtkCubeSource() sources.Update() glyph = vtk.vtkGlyph3D()", "with opacity per cell type :param steps: steps (all steps are shown when", "actors to the renderer, should not be used for animations :param save: save", "tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath) cb = vtkTimerCallback(update_func, len(steps), save) if len(actors) > 0:", "\"\"\" if (tau_colors is None) or (len(tau_colors) is not len(tau)): tau_colors = [(.5,", "impath=impath) cb = vtkTimerCallback(update_func, len(steps), save) if len(actors) > 0: cb.update_actors = [actors[tau.index(t)]", "args.saveim: if not args.outdir: args.outdir = args.simdir if not os.path.isdir(args.outdir): print \"Create output", "of alpha values does not match number of cell types - default to", "= [1 for t in args.celltypes] bnd = {} if args.color_xmax is not", "1 class Visualizer3D(): \"\"\" Create visualizer object :param simdir: path to folder containing", "self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5, 0.5), opacity=1): \"\"\"", "points = vtk.vtkPoints() for s in show_idx: if s not in sigma: continue", "if data.GetPointData().HasArray('cell.id') != 1: print \"'cell.id' array missing from {} -> skip file\".format(fn)", "def _get_box_actor(self): \"\"\" Create and return actor for wire frame box of the", "wire frame color :param cam_props: dictionary with camera settings :param onthefly: read data", "= {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly: self.data =", "mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper) return actor def _load_data(self,", "\"\"\" Create visualizer object :param simdir: path to folder containing vtk files :param", "= [tau_alpha[i] for i, t in enumerate(tau) if t in update_tau] update_func =", "str(int(step))) actors = self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props is not", "execute(self, obj, event): iren = obj win = iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer() #", "if 'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print 'add boundary for {}", "tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props is not None: self._modify_cam() if show: self.renderWindowInteractor.Initialize()", "with camera settings :param onthefly: read data on the fly instead of all", "t in tau if t not in static_tau] else: cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent',", "imprefix: image prefix :param bbox: show bounding box :param tau_alpha: list with opacity", "winsize: window size :param bg: background color :param bbox_color: bounding box wire frame", "impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print 'save image {}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write()", "self.data: return self.data[step] else: if self.storeafterread: self.data[step] = self._load_data(self.files[step]) return self.data[step] else: return", "winsize[1]) def get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None): \"\"\" Create actors for", "self.data[step] else: if self.storeafterread: self.data[step] = self._load_data(self.files[step]) return self.data[step] else: return self._load_data(self.files[step]) def", "Add the polygon to a list of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) #", "None self.save = save def execute(self, obj, event): iren = obj win =", "opacity when they are not specified if tau_colors is None: tau_colors = [(0.5,", "steps is None: steps = self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0], tau, show=False,", "default to opaque objects\" args.alpha = [1 for t in args.celltypes] elif len(args.alpha)", "= self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId = timerId # start the interaction and timer", "args.color_ymin is not None: bnd['-y'] = args.color_ymin if args.color_zmin is not None: bnd['-z']", "stepdata is None: return [] else: actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for", "args.color_ymax if args.color_zmax is not None: bnd['z'] = args.color_zmax if args.color_xmin is not", "the bounding box (if applicable) \"\"\" # set default colors and opacity when", ": self._load_data(self.files[self.files.keys()[0]])} # setup renderer self._set_renderer(winsize, bg) def _get_step(self,step): \"\"\" Retrieve vtk data", "the cell types\") parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\", help=\"opacity of the cell types\") parser.add_argument(\"--static\",", "bnd is not None: for tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add actors to", "set saving options if args.imprefix or args.outdir or args.movie: args.saveim = True if", "color per cell type \"\"\" self.renderWindow.SetWindowName('step ' + str(int(step))) actors = self.get_actors(step, tau_list,", "self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print 'add boundary for {} with color {}'.format(tp,color) (w, h,", "(len(tau_alpha) is not len(tau)): tau_alpha = [1 for t in tau] if steps", "specified - default to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] elif", "opaque objects\" args.alpha = [1 for t in args.celltypes] bnd = {} if", "len(bnd) == 0: bnd = {} # set saving options if args.imprefix or", "> 0: cb.update_actors = [actors[tau.index(t)] for t in tau if t not in", "\"\"\" Set up vtk renderer \"\"\" self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow", "is None: return [] else: actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for i,", "p[1] - .5, p[2] - .5) polydata = vtk.vtkPolyData() polydata.SetPoints(points) sources = vtk.vtkCubeSource()", "\"<NAME>\" __copyright__ = \"Copyright 2016\" __credits__ = \"<NAME>\" __license__ = \"MIT\" __version__ =", "cb.update_actors = [actors[tau.index(t)] for t in tau if t not in static_tau] else:", "keys and settings as values \"\"\" old_cam = self.renderer.GetActiveCamera(); cam = vtk.vtkCamera() if", "args.celltypes] else: args.colors = [get_color(c) for c in args.colors] if not args.alpha: print", "- pitch If position and focal point are not given, they will be", "is used instead of \" \"keeping it in memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie", "# set colors and opacity if not args.colors: print \"Cell color not specified", "are not given, they will be taken from the camera in the renderer.", "self.bnd_colors = bnd_colors # read data get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk', '')) if", "output directory {}\".format(args.outdir) os.makedirs(args.outdir) if not args.imprefix: args.imprefix = \"frame\" # set camera", ".5, p[1] - .5, p[2] - .5) polydata = vtk.vtkPolyData() polydata.SetPoints(points) sources =", "elif len(args.alpha) == 1: args.alpha = [args.alpha for t in args.celltypes] elif len(args.alpha)", "= self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData() imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w, h, d) imageData.SetOrigin(0, 0,", "+ '_' if imprefix is None: imprefix = '' if impath is None:", "actor in self.update_actors: ren.RemoveActor(actor) # set t to correct value t = self.timer_count", "cell types\") parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static cell types (will NOT be updated during", "+ str(int(step))) actors = self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props is", "parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps to animate, all steps will be shown if this", "data for a specific step \"\"\" if step in self.data: return self.data[step] else:", "bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add actors to the renderer for actor in actors: self.renderer.AddActor(actor)", "in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def", "if bnd is not None: for tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add actors", "is None) or (len(tau_alpha) is not len(tau)): tau_alpha = [1 for t in", "vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3,", "self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0], tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if", "update_tau = [t for t in tau if t not in static_tau] update_colors", "None: args.moviename = args.imprefix makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir, args.fps, win=args.win, tomp4=args.mp4) elif", "def animate(self, tau, tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None, imprefix=None, fps=5, static_tau=None): \"\"\" Animate", "is not None: bnd['y'] = args.color_ymax if args.color_zmax is not None: bnd['z'] =", "= \"<NAME>\" # the vtkTimerCallback takes care of updating the visualzation class vtkTimerCallback():", "animate\", required=True) parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\", help=\"colors or the cell types\") parser.add_argument(\"-a\", \"--alpha\",", "nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args() def main():", "= args.color_ymax if args.color_zmax is not None: bnd['z'] = args.color_zmax if args.color_xmin is", "parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0, 0, 0), help=\"background color\") parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200, 200,", "else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity)", "timerId # start the interaction and timer self.renderWindowInteractor.Start() def get_color(name): \"\"\" Get color", "is None: imprefix = '' if impath is None: impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath,", "the renderer for actor in actors: self.renderer.AddActor(actor) return actors def _modify_cam(self): \"\"\" Modify", "vtk.vtkPoints() for s in show_idx: if s not in sigma: continue pix =", "argparse import numpy as np import vtk from vtk.util import numpy_support as VN", "list with opacity per cell type :param steps: steps (all steps are shown", "\"\"\" reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data = reader.GetOutput() if data.GetPointData().HasArray('cell.id') !=", "if (tau_colors is None) or (len(tau_colors) is not len(tau)): tau_colors = [(.5, .5,", "args.moviedir = args.outdir if args.moviename is None: args.moviename = args.imprefix makeMovie(args.imprefix, 'png', args.moviename,", "type :param tau_alpha: list with opacity per cell type :param bbox: show bounding", "size :param bg: background color :param bbox_color: bounding box wire frame color :param", "self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print 'add boundary for {} with color {}'.format(tp,color)", "image :param imprefix: image prefix :param fps: frames per second :param static_tau: static", "not specified if tau_colors is None: tau_colors = [(0.5, 0.5, 0.5) for tau", "if not os.path.isdir(args.outdir): print \"Create output directory {}\".format(args.outdir) os.makedirs(args.outdir) if not args.imprefix: args.imprefix", "for wire frame box of the simulation domain \"\"\" (w, h, d) =", "in args.celltypes] elif len(args.alpha) == 1: args.alpha = [args.alpha for t in args.celltypes]", "return reader.GetOutput() def visualize(self, step, tau_list, show=False, save=False, impath=None, imprefix=None, bbox=True, tau_alpha=None, tau_colors=None,", "of the cell types\") parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static cell types (will NOT be", ":param renderer: vtk renderer :param cam_props: dictionary with options (see above) as keys", "= VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim, order='F') show_idx = np.unique(sigma[tau == show_tau]) points =", "imprefix=args.imprefix, fps=args.fps, static_tau=args.static) # create and store movie if args.movie and found_im2movie: if", "s: self.visualize(steps[t], update_tau, show=False, save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath) cb = vtkTimerCallback(update_func,", "\"\"\" Retrieve vtk data for a specific step \"\"\" if step in self.data:", "in tau_list] if tau_alpha is None: tau_alpha = [1 for tau in tau_list]", "else: return cc.to_rgb(\"grey\") def parse_args(): parser = argparse.ArgumentParser() # parser.description(\"Animate 3D Morpheus simulations\")", "<= 5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2]) return", "= [tau_colors[i] for i, t in enumerate(tau) if t in update_tau] update_alpha =", "cell types - default to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes]", "Built specifically to visualize the VTK files created by Morpheus \"\"\" import os", "= vtk.vtkImageData() imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w, h, d) imageData.SetOrigin(0, 0, 0) mapper =", "cam_props self.storeafterread = storeafterread self.bnd_colors = bnd_colors # read data get_num = lambda", "objects\" args.alpha = [1 for t in args.celltypes] elif len(args.alpha) == 1: args.alpha", "save=False): self.timer_count = 0 self.update = update_func self.tmax = tmax self.update_actors = None", "to correct value t = self.timer_count if self.timer_count >= self.tmax: t = self.timer_count", "cell type \"\"\" self.renderWindow.SetWindowName('step ' + str(int(step))) actors = self.get_actors(step, tau_list, tau_colors, tau_alpha,", "tau_alpha[i]) for i, tau in enumerate(tau_list)] # get bounding box wire frame if", "python \"\"\"Visualizes data on a cubic lattice Built specifically to visualize the VTK", "= imprefix + '_' if imprefix is None: imprefix = '' if impath", "box :param tau_alpha: list with opacity per cell type :param tau_colors: list with", "steps (all steps are shown when not specified) :param save: save view to", "winsize=(800, 800), bg=(0, 0, 0), bbox_color=(1, 1, 1), cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color", "points = vtk.vtkPoints() f = 0 if '-' in tp else 1 if", "color[2]) actor.SetMapper(mapper) return actor def _load_data(self, fn): \"\"\" Load vtk files \"\"\" reader", "png :param impath: path to store image :param imprefix: image prefix :param bbox:", "= win.GetRenderers().GetFirstRenderer() # remove all actors that will be updated for actor in", "tmax=1, save=False): self.timer_count = 0 self.update = update_func self.tmax = tmax self.update_actors =", "png :param impath: path to store image :param imprefix: image prefix :param fps:", "f for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps} else: self.files = {get_num(f)", "t in tau if t not in static_tau] update_colors = [tau_colors[i] for i,", "not None: bnd['-y'] = args.color_ymin if args.color_zmin is not None: bnd['-z'] = args.color_zmin", "nargs=\"*\", help=\"static cell types (will NOT be updated during animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3,", "imprefix.endswith('_'): imprefix = imprefix + '_' if imprefix is None: imprefix = ''", "{get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly: self.data = {n", "h, d) imageData.SetOrigin(0, 0, 0) mapper = vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData)", "tau_list: list of cell types :param show: initialize and start the render window", "vtk renderer :param cam_props: dictionary with options (see above) as keys and settings", "if t in update_tau] update_func = lambda t, s: self.visualize(steps[t], update_tau, show=False, save=s,", "files created by Morpheus \"\"\" import os import glob import sys import argparse", "= self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0], tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors)", "help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\",", "args.outdir or args.movie: args.saveim = True if args.saveim: if not args.outdir: args.outdir =", "vtk.vtkPolyData() polydata.SetPoints(points) sources = vtk.vtkCubeSource() sources.Update() glyph = vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <= 5:", "is not len(tau)): tau_alpha = [1 for t in tau] if steps is", "parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args() def main(): args = parse_args() # check", "if imprefix is None: imprefix = '' if impath is None: impath =", "self._load_data(self.files[step]) return self.data[step] else: return self._load_data(self.files[step]) def _set_renderer(self, winsize, bg): \"\"\" Set up", "args.color_zmin if len(bnd) == 0: bnd = {} # set saving options if", "found_im2movie: print \"WARNING: Movie generation is turned of because im2movie was not found\"", "= args.imprefix makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir, args.fps, win=args.win, tomp4=args.mp4) elif not found_im2movie:", "# set camera cam_props = {'position': args.camposition, 'focal point': args.camfocus} # create visualizer", "\"--imprefix\", type=str, help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make", "cc.to_rgb(\"grey\") def parse_args(): parser = argparse.ArgumentParser() # parser.description(\"Animate 3D Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\",", "box wire frame color :param cam_props: dictionary with camera settings :param onthefly: read", "print 'save image {}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write() return actors def animate(self, tau, tau_colors=None,", "else: return self._load_data(self.files[step]) def _set_renderer(self, winsize, bg): \"\"\" Set up vtk renderer \"\"\"", "args.colors: print \"Cell color not specified - default to grey\" args.colors = [get_color(\"grey\")", "self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None,", "1), cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color = bbox_color self.cam_props = cam_props self.storeafterread =", "tau if t not in static_tau] else: cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId", "pix = np.column_stack(np.where(sigma == s)) for p in pix: points.InsertNextPoint(p[0] - .5, p[1]", "= args.color_xmin if args.color_ymin is not None: bnd['-y'] = args.color_ymin if args.color_zmin is", "types that should not be updated during the animation \"\"\" if (tau_colors is", "to rgb string if color in colors.cnames: color = get_color(color) else: color =", "True if args.saveim: if not args.outdir: args.outdir = args.simdir if not os.path.isdir(args.outdir): print", "return actor def _load_data(self, fn): \"\"\" Load vtk files \"\"\" reader = vtk.vtkStructuredPointsReader()", "found_im2movie: if args.moviedir is None: args.moviedir = args.outdir if args.moviename is None: args.moviename", "action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make movie after closing the visualization window\")", "read data get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk', '')) if steps is not None:", "values does not match number of cell types - default to opaque objects\"", "tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad", ":param cam_props: dictionary with options (see above) as keys and settings as values", "tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props is not None: self._modify_cam() if show:", "help=\"static cell types (will NOT be updated during animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1,", "def get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None): \"\"\" Create actors for a", "parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return", "list of cell types :param show: initialize and start the render window after", "to folder containing vtk files :param steps: steps to visualize :param winsize: window", "color\") parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200, 200, 200), help=\"camera position\") parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100,", "per cell type :param tau_alpha: list with opacity per cell type :param steps:", "imprefix = '' if impath is None: impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step))", "to animate\", required=True) parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\", help=\"colors or the cell types\") parser.add_argument(\"-a\",", "t not in static_tau] update_colors = [tau_colors[i] for i, t in enumerate(tau) if", "ren.RemoveActor(actor) # set t to correct value t = self.timer_count if self.timer_count >=", ":param winsize: window size :param bg: background color :param bbox_color: bounding box wire", "fn: int(fn.split('_')[-1].replace('.vtk', '')) if steps is not None: self.files = {get_num(f) : f", "bbox=True, bnd=None): \"\"\" Create actors for a list of cell types and add", "def _get_bnd_actor(self,tp,color): print 'add boundary for {} with color {}'.format(tp,color) (w, h, d)", "= vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor = vtk.vtkActor() actor.SetMapper(mapper)", "[(0.5, 0.5, 0.5) for tau in tau_list] if tau_alpha is None: tau_alpha =", "imprefix + '_' if imprefix is None: imprefix = '' if impath is", "per cell type :param bbox: show bounding box :returns: list of actors with", "a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add the", "focal point\") # parser.add_argument(\"--campitch\", type=float, default=, help=\"camera pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps to", "\"\"\" Modify the camera settings for the renderer. Available options: - position -", "help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all data at once before the visualization starts\")", "show_tau, color=(0.5, 0.5, 0.5), opacity=1): \"\"\" Create actor for a cell type \"\"\"", "per cell type :param steps: steps (all steps are shown when not specified)", "objects\" args.alpha = [1 for t in args.celltypes] bnd = {} if args.color_xmax", "If position and focal point are not given, they will be taken from", "= vtkTimerCallback(update_func, len(steps), save) if len(actors) > 0: cb.update_actors = [actors[tau.index(t)] for t", "return cc.to_rgb(name) else: return cc.to_rgb(\"grey\") def parse_args(): parser = argparse.ArgumentParser() # parser.description(\"Animate 3D", "float(fps))) cb.timerId = timerId # start the interaction and timer self.renderWindowInteractor.Start() def get_color(name):", "renderer :param step: step to visualize :param tau_list: list of cell types :param", "[1 for t in args.celltypes] elif len(args.alpha) == 1: args.alpha = [args.alpha for", "\"--celltypes\", type=int, nargs=\"*\", help=\"cell types to animate\", required=True) parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\", help=\"colors", "store image :param imprefix: image prefix :param bbox: show bounding box :param tau_alpha:", "= [t for t in tau if t not in static_tau] update_colors =", "def __init__(self, update_func, tmax=1, save=False): self.timer_count = 0 self.update = update_func self.tmax =", "points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1)", "polygon.GetPointIds().SetId(3, 3) # Add the polygon to a list of polygons polygons =", "{} if args.color_xmax is not None: bnd['x'] = args.color_xmax if args.color_ymax is not", "t in args.celltypes] elif len(args.colors) < len(args.celltypes): print \"Number of colors does not", "bbox_color: bounding box wire frame color :param cam_props: dictionary with camera settings :param", "\"Alpha values not specified - default to opaque objects\" args.alpha = [1 for", "if not onthefly: self.data = {n : self._load_data(f) for n,f in self.files.iteritems()} else:", "not found_im2movie: print \"WARNING: Movie generation is turned of because im2movie was not", "= {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps} else:", "actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper) return actor def _load_data(self, fn): \"\"\" Load vtk files", "\"MIT\" __version__ = \"0.1\" __maintainer__ = \"<NAME>\" # the vtkTimerCallback takes care of", "visualizer object :param simdir: path to folder containing vtk files :param steps: steps", "the visualzation class vtkTimerCallback(): def __init__(self, update_func, tmax=1, save=False): self.timer_count = 0 self.update", "[1 for t in args.celltypes] bnd = {} if args.color_xmax is not None:", "[get_color(\"grey\") for t in args.celltypes] else: args.colors = [get_color(c) for c in args.colors]", "{}\".format(args.simdir)) # set colors and opacity if not args.colors: print \"Cell color not", "parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\", help=\"opacity of the cell types\") parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static", "1, 1), cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color = bbox_color self.cam_props = cam_props self.storeafterread", "type=float, nargs=3, default=(1, 1, 1), help=\"bounding box color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0, 0,", "args.readall), storeafterread=(not args.savemem), bnd_colors=bnd) # start animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir,", "polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points)", "the interaction and timer self.renderWindowInteractor.Start() def get_color(name): \"\"\" Get color for matplotlib color", "help=\"frames per second\") parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image prefix\")", "cc = colors.ColorConverter() if name in colors.cnames: return cc.to_rgb(name) else: return cc.to_rgb(\"grey\") def", "{} -> skip file\".format(fn) return None return reader.GetOutput() def visualize(self, step, tau_list, show=False,", "in show_idx: if s not in sigma: continue pix = np.column_stack(np.where(sigma == s))", "args.alpha = [1 for t in args.celltypes] bnd = {} if args.color_xmax is", "return actor def _get_box_actor(self): \"\"\" Create and return actor for wire frame box", "not None: for tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add actors to the renderer", ":param impath: path to store image :param imprefix: image prefix :param bbox: show", "images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make movie after closing the visualization window\") parser.add_argument(\"--moviedir\", type=str,", "point - pitch If position and focal point are not given, they will", "visualize(self, step, tau_list, show=False, save=False, impath=None, imprefix=None, bbox=True, tau_alpha=None, tau_colors=None, bnd=None): \"\"\" Visualize", "impath is None: impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print 'save image {}/{}{:03d}.png'.format(impath,", "Movie generation is turned of because im2movie was not found\" if __name__ ==", "with opacity per cell type :param tau_colors: list with color per cell type", "'z' in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make", "\"WARNING: Movie generation is turned of because im2movie was not found\" if __name__", "storeafterread=(not args.savemem), bnd_colors=bnd) # start animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix,", "tau, tau_colors[i], tau_alpha[i]) for i, tau in enumerate(tau_list)] # get bounding box wire", "tau_colors: list with color per cell type :param tau_alpha: list with opacity per", "list with opacity per cell type :param bbox: show bounding box :returns: list", "'png', args.moviename, args.outdir, args.moviedir, args.fps, win=args.win, tomp4=args.mp4) elif not found_im2movie: print \"WARNING: Movie", "step: step to visualize :param tau_list: list of cell types :param tau_colors: list", "self.renderWindow.Render() if self.cam_props is not None: self._modify_cam() if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save:", "in tau] if (tau_alpha is None) or (len(tau_alpha) is not len(tau)): tau_alpha =", "== 0: sys.exit(\"No vtk files found in {}\".format(args.simdir)) # set colors and opacity", "vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0],", "for n,f in self.files.iteritems()} else: self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} # setup renderer", "actors def animate(self, tau, tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None, imprefix=None, fps=5, static_tau=None): \"\"\"", "points.InsertNextPoint(p[0] - .5, p[1] - .5, p[2] - .5) polydata = vtk.vtkPolyData() polydata.SetPoints(points)", "t in update_tau] update_alpha = [tau_alpha[i] for i, t in enumerate(tau) if t", "VTK files created by Morpheus \"\"\" import os import glob import sys import", "= [1 for tau in tau_list] # get actors stepdata = self._get_step(step) if", "else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print 'add boundary", "try: from im2movie import makeMovie except ImportError: found_im2movie = False __author__ = \"<NAME>\"", "static_tau is None: static_tau = [] update_tau = [t for t in tau", "= {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} # setup renderer self._set_renderer(winsize, bg) def _get_step(self,step): \"\"\" Retrieve", "points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z' in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon", "{} # set saving options if args.imprefix or args.outdir or args.movie: args.saveim =", "the cell types\") parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static cell types (will NOT be updated", ":param bg: background color :param bbox_color: bounding box wire frame color :param cam_props:", "if args.saveim: if not args.outdir: args.outdir = args.simdir if not os.path.isdir(args.outdir): print \"Create", "args.imprefix = \"frame\" # set camera cam_props = {'position': args.camposition, 'focal point': args.camfocus}", "= VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim, order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim, order='F')", "None: imprefix = '' if impath is None: impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix,", "help=\"steps to animate, all steps will be shown if this \" \"is not", "bbox=True, tau_alpha=None, tau_colors=None, bnd=None): \"\"\" Visualize a given step. :param step: step to", "\"0.1\" __maintainer__ = \"<NAME>\" # the vtkTimerCallback takes care of updating the visualzation", "args.celltypes] elif len(args.colors) == 1: args.colors = [get_color(args.colors[0]) for t in args.celltypes] elif", "tau_alpha=None, tau_colors=None, bnd=None): \"\"\" Visualize a given step. :param step: step to visualize", "parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1, 1, 1), help=\"bounding box color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0,", "not be updated during the animation \"\"\" if (tau_colors is None) or (len(tau_colors)", "800)) parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\", help=\"cell types to animate\", required=True) parser.add_argument(\"-c\", \"--colors\", type=str,", "check if there is something to animate if not os.path.isdir(args.simdir): sys.exit(\"Could not find", "if self.storeafterread: self.data[step] = self._load_data(self.files[step]) return self.data[step] else: return self._load_data(self.files[step]) def _set_renderer(self, winsize,", "specified if tau_colors is None: tau_colors = [(0.5, 0.5, 0.5) for tau in", "-> skip file\".format(fn) return None return reader.GetOutput() def visualize(self, step, tau_list, show=False, save=False,", "cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color = bbox_color self.cam_props = cam_props self.storeafterread = storeafterread", "def execute(self, obj, event): iren = obj win = iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer()", "bnd['y'] = args.color_ymax if args.color_zmax is not None: bnd['z'] = args.color_zmax if args.color_xmin", "{'position': args.camposition, 'focal point': args.camfocus} # create visualizer v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor,", "# set default colors and opacity when they are not specified if tau_colors", "save=False, impath=None, imprefix=None, fps=5, static_tau=None): \"\"\" Animate simulation results :param tau: list of", "bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props is not None: self._modify_cam() if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if", "nargs=3) return parser.parse_args() def main(): args = parse_args() # check if there is", "action=\"store_true\", help=\"make movie after closing the visualization window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\") parser.add_argument(\"--moviename\",", "args.movie: args.saveim = True if args.saveim: if not args.outdir: args.outdir = args.simdir if", ":param static_tau: static cell types that should not be updated during the animation", "visualzation class vtkTimerCallback(): def __init__(self, update_func, tmax=1, save=False): self.timer_count = 0 self.update =", "VN from matplotlib import colors found_im2movie = True try: from im2movie import makeMovie", "print \"Alpha values not specified - default to opaque objects\" args.alpha = [1", "glyph.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper)", "simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2, help=\"window size\",", "updated during the animation \"\"\" if (tau_colors is None) or (len(tau_colors) is not", "if save: w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix", "'add boundary for {} with color {}'.format(tp,color) (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() points", "None: args.moviedir = args.outdir if args.moviename is None: args.moviename = args.imprefix makeMovie(args.imprefix, 'png',", "and actor mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor", "def parse_args(): parser = argparse.ArgumentParser() # parser.description(\"Animate 3D Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str,", "else: args.colors = [get_color(c) for c in args.colors] if not args.alpha: print \"Alpha", "save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static) # create and store movie if args.movie and", "path to store image :param imprefix: image prefix :param fps: frames per second", "glob import sys import argparse import numpy as np import vtk from vtk.util", "parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200, 200, 200), help=\"camera position\") parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100, 100,", "size\", default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\", help=\"cell types to animate\", required=True) parser.add_argument(\"-c\",", "bnd_colors=bnd) # start animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static)", "or (len(tau_alpha) is not len(tau)): tau_alpha = [1 for t in tau] if", "if steps is None: steps = self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0], tau,", "bbox: actors.append(self._get_box_actor()) if bnd is not None: for tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) #", "grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] elif len(args.colors) == 1: args.colors", "Set up vtk renderer \"\"\" self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow =", "Animate simulation results :param tau: list of cell types :param tau_colors: list with", "not in static_tau] update_colors = [tau_colors[i] for i, t in enumerate(tau) if t", "action=\"store_true\", help=\"read all data at once before the visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread", "\"\"\" # set default colors and opacity when they are not specified if", "tau_alpha: list with opacity per cell type :param tau_colors: list with color per", "in {}\".format(args.simdir)) # set colors and opacity if not args.colors: print \"Cell color", "for a cell type \"\"\" if isinstance(color, basestring): # convert color to rgb", "and return actor for wire frame box of the simulation domain \"\"\" (w,", "box of the simulation domain \"\"\" (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() imageData =", "cell type :param tau_alpha: list with opacity per cell type :param steps: steps", "v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static) # create and store", "self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5, 0.5), opacity=1):", "tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z' in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d)", "if steps is not None: self.files = {get_num(f) : f for f in", "in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y' in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d)", "or (len(tau_colors) is not len(tau)): tau_colors = [(.5, .5, .5) for t in", "for c in args.colors] if not args.alpha: print \"Alpha values not specified -", "onthefly: self.data = {n : self._load_data(f) for n,f in self.files.iteritems()} else: self.data =", "is None: tau_alpha = [1 for tau in tau_list] # get actors stepdata", "w2i.Update() writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is not None and imprefix.endswith('_'): imprefix", "once before the visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk file every time it", "of cell types - default to opaque objects\" args.alpha = [1 for t", "skip file\".format(fn) return None if data.GetPointData().HasArray('cell.type') != 1: print \"'cell.id' array missing from", "= self.renderer.GetActiveCamera(); cam = vtk.vtkCamera() if 'position' in self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if", "save=False, impath=None, imprefix=None, bbox=True, tau_alpha=None, tau_colors=None, bnd=None): \"\"\" Visualize a given step. :param", "not in sigma: continue pix = np.column_stack(np.where(sigma == s)) for p in pix:", "timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId = timerId # start the interaction and", "= self._load_data(self.files[step]) return self.data[step] else: return self._load_data(self.files[step]) def _set_renderer(self, winsize, bg): \"\"\" Set", "not args.alpha: print \"Alpha values not specified - default to opaque objects\" args.alpha", "[1 for t in tau] if steps is None: steps = self.files.keys() steps.sort()", "= self.timer_count % self.tmax self.save = False # get new actors actors =", "in enumerate(tau) if t in update_tau] update_func = lambda t, s: self.visualize(steps[t], update_tau,", "parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100, 100, 50), help=\"camera focal point\") # parser.add_argument(\"--campitch\", type=float, default=,", "= vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper", "bg): \"\"\" Set up vtk renderer \"\"\" self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2])", "# read data get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk', '')) if steps is not", "glyph.ScalingOff() glyph.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2])", "steps = self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0], tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha,", "for a list of cell types and add them to the renderer :param", "actors stepdata = self._get_step(step) if stepdata is None: return [] else: actors =", "parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args() def main(): args", "import glob import sys import argparse import numpy as np import vtk from", "= args.outdir if args.moviename is None: args.moviename = args.imprefix makeMovie(args.imprefix, 'png', args.moviename, args.outdir,", "tau_list: list of cell types :param tau_colors: list with color per cell type", "if not args.colors: print \"Cell color not specified - default to grey\" args.colors", "with color {}'.format(tp,color) (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints() f =", "None) or (len(tau_colors) is not len(tau)): tau_colors = [(.5, .5, .5) for t", "if args.moviedir is None: args.moviedir = args.outdir if args.moviename is None: args.moviename =", "None: bnd['-z'] = args.color_zmin if len(bnd) == 0: bnd = {} # set", "bg[2]) self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self,", ":param bbox_color: bounding box wire frame color :param cam_props: dictionary with camera settings", "colors.cnames: color = get_color(color) else: color = (0.5, 0.5, 0.5) dim = stepdata.GetDimensions()", "= obj win = iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer() # remove all actors that", "bbox: show bounding box :returns: list of actors with first the actors for", "= self._get_step(step) if stepdata is None: return [] else: actors = [self._get_actor_for_tau(stepdata, tau,", "found in {}\".format(args.simdir)) # set colors and opacity if not args.colors: print \"Cell", "box (if applicable) \"\"\" # set default colors and opacity when they are", "tau.reshape(dim, order='F') show_idx = np.unique(sigma[tau == show_tau]) points = vtk.vtkPoints() for s in", "else: self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} # setup renderer self._set_renderer(winsize, bg) def _get_step(self,step):", "in args.celltypes] elif len(args.alpha) < len(args.celltypes): print \"Number of alpha values does not", "it is used instead of \" \"keeping it in memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make", "t in args.celltypes] else: args.colors = [get_color(c) for c in args.colors] if not", "self.update = update_func self.tmax = tmax self.update_actors = None self.save = save def", "self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color):", "is None: args.moviedir = args.outdir if args.moviename is None: args.moviename = args.imprefix makeMovie(args.imprefix,", "self.tmax self.save = False # get new actors actors = self.update(t, self.save) self.update_actors", "polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add the polygon to", "len(args.celltypes): print \"Number of alpha values does not match number of cell types", "as keys and settings as values \"\"\" old_cam = self.renderer.GetActiveCamera(); cam = vtk.vtkCamera()", "parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args() def main(): args = parse_args()", "movie windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float,", "steps is not None: self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir))", "t in args.celltypes] bnd = {} if args.color_xmax is not None: bnd['x'] =", "\" \"keeping it in memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\",", "[actors[tau.index(t)] for t in tau if t not in static_tau] else: cb.update_actors =", "help=\"camera position\") parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100, 100, 50), help=\"camera focal point\") # parser.add_argument(\"--campitch\",", "d) imageData.SetOrigin(0, 0, 0) mapper = vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData) else:", "args.outdir = args.simdir if not os.path.isdir(args.outdir): print \"Create output directory {}\".format(args.outdir) os.makedirs(args.outdir) if", "if 'focal point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in self.cam_props:", "self.files.iteritems()} else: self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} # setup renderer self._set_renderer(winsize, bg) def", "winsize, bg): \"\"\" Set up vtk renderer \"\"\" self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1],", "get actors stepdata = self._get_step(step) if stepdata is None: return [] else: actors", "d) = self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints() f = 0 if '-' in tp", "second\") parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\",", "read data on the fly instead of all at once \"\"\" def __init__(self,", "given step. :param step: step to visualize :param tau_list: list of cell types", "not onthefly: self.data = {n : self._load_data(f) for n,f in self.files.iteritems()} else: self.data", "args.celltypes] elif len(args.alpha) < len(args.celltypes): print \"Number of alpha values does not match", "self.update_actors: ren.RemoveActor(actor) # set t to correct value t = self.timer_count if self.timer_count", "t in args.celltypes] elif len(args.colors) == 1: args.colors = [get_color(args.colors[0]) for t in", "not None: bnd['x'] = args.color_xmax if args.color_ymax is not None: bnd['y'] = args.color_ymax", "vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self, step, tau_list, tau_colors=None,", "imprefix=None, bbox=True, tau_alpha=None, tau_colors=None, bnd=None): \"\"\" Visualize a given step. :param step: step", "visualize :param tau_list: list of cell types :param show: initialize and start the", "prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make movie after closing", "Available options: - position - focal point - pitch If position and focal", "new actors actors = self.update(t, self.save) self.update_actors = actors self.timer_count += 1 class", "cubic lattice Built specifically to visualize the VTK files created by Morpheus \"\"\"", "'.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print 'save image {}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write() return actors", "help=\"save images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make movie after closing the visualization window\") parser.add_argument(\"--moviedir\",", "they will be taken from the camera in the renderer. :param renderer: vtk", "= colors.ColorConverter() if name in colors.cnames: return cc.to_rgb(name) else: return cc.to_rgb(\"grey\") def parse_args():", "= vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow)", "\"is not specified\") parser.add_argument(\"-f\", \"--fps\", type=float, default=5, help=\"frames per second\") parser.add_argument(\"-o\", \"--outdir\", type=str,", "s in show_idx: if s not in sigma: continue pix = np.column_stack(np.where(sigma ==", "\"\"\" (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData() imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w,", "the fly instead of all at once \"\"\" def __init__(self, simdir, steps=None, winsize=(800,", "cb.timerId = timerId # start the interaction and timer self.renderWindowInteractor.Start() def get_color(name): \"\"\"", "is None: static_tau = [] update_tau = [t for t in tau if", "the VTK files created by Morpheus \"\"\" import os import glob import sys", "def get_color(name): \"\"\" Get color for matplotlib color name \"\"\" cc = colors.ColorConverter()", "tau: list of cell types :param tau_colors: list with color per cell type", "string if color in colors.cnames: color = get_color(color) else: color = (0.5, 0.5,", "os.path.isdir(args.simdir): sys.exit(\"Could not find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No vtk files found", "= \"MIT\" __version__ = \"0.1\" __maintainer__ = \"<NAME>\" # the vtkTimerCallback takes care", "vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5,", "cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId = timerId # start the interaction", "position - focal point - pitch If position and focal point are not", "color to rgb string if color in colors.cnames: color = get_color(color) else: color", "self.tmax: t = self.timer_count % self.tmax self.save = False # get new actors", "winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem), bnd_colors=bnd) # start animation v.animate(args.celltypes,", "200), help=\"camera position\") parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100, 100, 50), help=\"camera focal point\") #", "add actors to the renderer for actor in actors: self.renderer.AddActor(actor) return actors def", "a given step. :param step: step to visualize :param tau_list: list of cell", "reader.Update() data = reader.GetOutput() if data.GetPointData().HasArray('cell.id') != 1: print \"'cell.id' array missing from", "self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self, step,", "= vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) #", "self.cam_props is not None: self._modify_cam() if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save: w2i =", "if imprefix is not None and imprefix.endswith('_'): imprefix = imprefix + '_' if", "tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is None: static_tau = []", "= self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints() f = 0 if '-' in tp else", "cb = vtkTimerCallback(update_func, len(steps), save) if len(actors) > 0: cb.update_actors = [actors[tau.index(t)] for", "os import glob import sys import argparse import numpy as np import vtk", "not len(tau)): tau_alpha = [1 for t in tau] if steps is None:", "polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3)", "False __author__ = \"<NAME>\" __copyright__ = \"Copyright 2016\" __credits__ = \"<NAME>\" __license__ =", "bbox_color=(1, 1, 1), cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color = bbox_color self.cam_props = cam_props", "0: cb.update_actors = [actors[tau.index(t)] for t in tau if t not in static_tau]", "polygonPolyData.SetPolys(polygons) # Create a mapper and actor mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <=", "for matplotlib color name \"\"\" cc = colors.ColorConverter() if name in colors.cnames: return", "is None: steps = self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0], tau, show=False, save=False,", ".5) polydata = vtk.vtkPolyData() polydata.SetPoints(points) sources = vtk.vtkCubeSource() sources.Update() glyph = vtk.vtkGlyph3D() if", "cell types (will NOT be updated during animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1, 1,", "to opaque objects\" args.alpha = [1 for t in args.celltypes] elif len(args.alpha) ==", "= reader.GetOutput() if data.GetPointData().HasArray('cell.id') != 1: print \"'cell.id' array missing from {} ->", "show bounding box :returns: list of actors with first the actors for tau_list", "\"--simdir\", type=str, default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2, help=\"window size\", default=(800, 800))", "= stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim, order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau", "steps=None, winsize=(800, 800), bg=(0, 0, 0), bbox_color=(1, 1, 1), cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None):", "domain \"\"\" (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData() imageData.SetDimensions(2, 2, 2)", "data.GetPointData().HasArray('cell.type') != 1: print \"'cell.id' array missing from {} -> skip file\".format(fn) return", "step, tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None): \"\"\" Create actors for a list of", "array missing from {} -> skip file\".format(fn) return None return reader.GetOutput() def visualize(self,", "in self.files.iteritems()} else: self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} # setup renderer self._set_renderer(winsize, bg)", "None: steps = self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0], tau, show=False, save=False, bbox=True,", "static_tau = [] update_tau = [t for t in tau if t not", "if len(actors) > 0: cb.update_actors = [actors[tau.index(t)] for t in tau if t", "None: bnd['-x'] = args.color_xmin if args.color_ymin is not None: bnd['-y'] = args.color_ymin if", "\"\"\" def __init__(self, simdir, steps=None, winsize=(800, 800), bg=(0, 0, 0), bbox_color=(1, 1, 1),", "is not None: bnd['-x'] = args.color_xmin if args.color_ymin is not None: bnd['-y'] =", "match number of cell types - default to grey\" args.colors = [get_color(\"grey\") for", "if t not in static_tau] update_colors = [tau_colors[i] for i, t in enumerate(tau)", "the visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk file every time it is used", "by Morpheus \"\"\" import os import glob import sys import argparse import numpy", "t, s: self.visualize(steps[t], update_tau, show=False, save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath) cb =", "color=(0.5, 0.5, 0.5), opacity=1): \"\"\" Create actor for a cell type \"\"\" if", "the actors to the renderer, should not be used for animations :param save:", "\"\"\" Load vtk files \"\"\" reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data =", "self.update(t, self.save) self.update_actors = actors self.timer_count += 1 class Visualizer3D(): \"\"\" Create visualizer", "points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z' in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon =", "tau in tau_list] if tau_alpha is None: tau_alpha = [1 for tau in", "type=str, default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2, help=\"window size\", default=(800, 800)) parser.add_argument(\"-t\",", "None if data.GetPointData().HasArray('cell.type') != 1: print \"'cell.id' array missing from {} -> skip", "sources.Update() glyph = vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff()", "type=int, nargs=\"*\", help=\"steps to animate, all steps will be shown if this \"", "not None: bnd['z'] = args.color_zmax if args.color_xmin is not None: bnd['-x'] = args.color_xmin", "a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper and actor", "= vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is not None and imprefix.endswith('_'): imprefix = imprefix", "with options (see above) as keys and settings as values \"\"\" old_cam =", "nargs=\"*\", help=\"colors or the cell types\") parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\", help=\"opacity of the", "help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make movie after", "as np import vtk from vtk.util import numpy_support as VN from matplotlib import", "h, d) = self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints() f = 0 if '-' in", "step to visualize :param tau_list: list of cell types :param tau_colors: list with", "isinstance(color, basestring): # convert color to rgb string if color in colors.cnames: color", "in the renderer. :param renderer: vtk renderer :param cam_props: dictionary with options (see", "tau_list] if tau_alpha is None: tau_alpha = [1 for tau in tau_list] #", "data at once before the visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk file every", "specified\") parser.add_argument(\"-f\", \"--fps\", type=float, default=5, help=\"frames per second\") parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output directory\")", "nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args() def main(): args =", "800), bg=(0, 0, 0), bbox_color=(1, 1, 1), cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color =", "second :param static_tau: static cell types that should not be updated during the", "if color in colors.cnames: color = get_color(color) else: color = (0.5, 0.5, 0.5)", "sys.exit(\"Could not find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No vtk files found in", "is not None: self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if", "a list of cell types and add them to the renderer :param step:", "default colors and opacity when they are not specified if tau_colors is None:", "0.5) dim = stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim, order='F') tau =", "mapper and actor mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData)", "is None) or (len(tau_colors) is not len(tau)): tau_colors = [(.5, .5, .5) for", "args.savemem), bnd_colors=bnd) # start animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps,", "bg) def _get_step(self,step): \"\"\" Retrieve vtk data for a specific step \"\"\" if", "during animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1, 1, 1), help=\"bounding box color\") parser.add_argument(\"--bgcolor\", type=float,", "to animate, all steps will be shown if this \" \"is not specified\")", "vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is not None and", "self.cam_props = cam_props self.storeafterread = storeafterread self.bnd_colors = bnd_colors # read data get_num", "vtk.vtkCamera() if 'position' in self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if 'focal point' in self.cam_props:", "= 0 if '-' in tp else 1 if 'x' in tp: points.InsertNextPoint(f*w,0,0)", "__license__ = \"MIT\" __version__ = \"0.1\" __maintainer__ = \"<NAME>\" # the vtkTimerCallback takes", "- focal point - pitch If position and focal point are not given,", "0.5) for tau in tau_list] if tau_alpha is None: tau_alpha = [1 for", "animate, all steps will be shown if this \" \"is not specified\") parser.add_argument(\"-f\",", "args.colors] if not args.alpha: print \"Alpha values not specified - default to opaque", "the animation \"\"\" if (tau_colors is None) or (len(tau_colors) is not len(tau)): tau_colors", "False # get new actors actors = self.update(t, self.save) self.update_actors = actors self.timer_count", "return cc.to_rgb(\"grey\") def parse_args(): parser = argparse.ArgumentParser() # parser.description(\"Animate 3D Morpheus simulations\") parser.add_argument(\"-i\",", "animation \"\"\" if (tau_colors is None) or (len(tau_colors) is not len(tau)): tau_colors =", "args.alpha: print \"Alpha values not specified - default to opaque objects\" args.alpha =", "t not in static_tau] else: cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000", "if vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper = vtk.vtkPolyDataMapper()", "for the renderer. Available options: - position - focal point - pitch If", "save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath) cb = vtkTimerCallback(update_func, len(steps), save) if len(actors)", "step)) print 'save image {}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write() return actors def animate(self, tau,", "None: bnd['x'] = args.color_xmax if args.color_ymax is not None: bnd['y'] = args.color_ymax if", "from {} -> skip file\".format(fn) return None if data.GetPointData().HasArray('cell.type') != 1: print \"'cell.id'", "d) = self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData() imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w, h, d) imageData.SetOrigin(0,", "# setup renderer self._set_renderer(winsize, bg) def _get_step(self,step): \"\"\" Retrieve vtk data for a", "'position' in self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if 'focal point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point'])", "sigma: continue pix = np.column_stack(np.where(sigma == s)) for p in pix: points.InsertNextPoint(p[0] -", "actor def _load_data(self, fn): \"\"\" Load vtk files \"\"\" reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn)", "specified) :param save: save view to png :param impath: path to store image", "self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints() f = 0 if '-' in tp else 1", "default=(100, 100, 50), help=\"camera focal point\") # parser.add_argument(\"--campitch\", type=float, default=, help=\"camera pitch\") parser.add_argument(\"--steps\",", "[args.alpha for t in args.celltypes] elif len(args.alpha) < len(args.celltypes): print \"Number of alpha", "return [] else: actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for i, tau in", "show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save: w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer = vtk.vtkPNGWriter()", "/ float(fps))) cb.timerId = timerId # start the interaction and timer self.renderWindowInteractor.Start() def", "\"\"\" Create actor for a cell type \"\"\" if isinstance(color, basestring): # convert", "else: cam.SetPosition(old_cam.GetPosition()) if 'focal point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch'", "Morpheus \"\"\" import os import glob import sys import argparse import numpy as", "= {n : self._load_data(f) for n,f in self.files.iteritems()} else: self.data = {self.files.keys()[0] :", "self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save: w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort())", "in self.data: return self.data[step] else: if self.storeafterread: self.data[step] = self._load_data(self.files[step]) return self.data[step] else:", "0, 0), help=\"background color\") parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200, 200, 200), help=\"camera position\") parser.add_argument(\"--camfocus\",", "bounding box :returns: list of actors with first the actors for tau_list followed", "when they are not specified if tau_colors is None: tau_colors = [(0.5, 0.5,", ":param tau_list: list of cell types :param tau_colors: list with color per cell", "[tau_colors[i] for i, t in enumerate(tau) if t in update_tau] update_alpha = [tau_alpha[i]", "renderer. :param renderer: vtk renderer :param cam_props: dictionary with options (see above) as", "movie after closing the visualization window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie", "self.tmax = tmax self.update_actors = None self.save = save def execute(self, obj, event):", "% self.tmax self.save = False # get new actors actors = self.update(t, self.save)", "impath=None, imprefix=None, fps=5, static_tau=None): \"\"\" Animate simulation results :param tau: list of cell", "Get color for matplotlib color name \"\"\" cc = colors.ColorConverter() if name in", "window size :param bg: background color :param bbox_color: bounding box wire frame color", "in memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4 movie\")", "parser.description(\"Animate 3D Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\", type=int,", "self._get_step(step) if stepdata is None: return [] else: actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i],", "nargs=\"*\", help=\"cell types to animate\", required=True) parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\", help=\"colors or the", "tau, tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None, imprefix=None, fps=5, static_tau=None): \"\"\" Animate simulation results", "nargs=3, default=(100, 100, 50), help=\"camera focal point\") # parser.add_argument(\"--campitch\", type=float, default=, help=\"camera pitch\")", "of cell types :param tau_colors: list with color per cell type :param tau_alpha:", "of actors with first the actors for tau_list followed by the bounding box", "for t in args.celltypes] elif len(args.colors) == 1: args.colors = [get_color(args.colors[0]) for t", "self._load_data(self.files[self.files.keys()[0]])} # setup renderer self._set_renderer(winsize, bg) def _get_step(self,step): \"\"\" Retrieve vtk data for", "save: w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is", "is not len(tau)): tau_colors = [(.5, .5, .5) for t in tau] if", "tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add actors to the renderer for actor in", "'')) if steps is not None: self.files = {get_num(f) : f for f", "save view to png :param impath: path to store image :param imprefix: image", "polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2,", "bnd=None): \"\"\" Visualize a given step. :param step: step to visualize :param tau_list:", "0, 0) mapper = vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor", "cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print 'add boundary for {} with color {}'.format(tp,color) (w,", "be shown if this \" \"is not specified\") parser.add_argument(\"-f\", \"--fps\", type=float, default=5, help=\"frames", "nargs=\"*\", help=\"steps to animate, all steps will be shown if this \" \"is", "takes care of updating the visualzation class vtkTimerCallback(): def __init__(self, update_func, tmax=1, save=False):", "save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is None: static_tau = [] update_tau =", "bg=(0, 0, 0), bbox_color=(1, 1, 1), cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color = bbox_color", "2) imageData.SetSpacing(w, h, d) imageData.SetOrigin(0, 0, 0) mapper = vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <=", "color :param bbox_color: bounding box wire frame color :param cam_props: dictionary with camera", "Modify the camera settings for the renderer. Available options: - position - focal", "3D Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2,", "frame if bbox: actors.append(self._get_box_actor()) if bnd is not None: for tp,color in bnd.iteritems():", "tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is None: static_tau = [] update_tau = [t for", "help=\"read all data at once before the visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk", "not specified - default to opaque objects\" args.alpha = [1 for t in", "[] update_tau = [t for t in tau if t not in static_tau]", "args.colors = [get_color(args.colors[0]) for t in args.celltypes] elif len(args.colors) < len(args.celltypes): print \"Number", "alpha values does not match number of cell types - default to opaque", "is not None: bnd['-y'] = args.color_ymin if args.color_zmin is not None: bnd['-z'] =", "static_tau: static cell types that should not be updated during the animation \"\"\"", "if args.color_ymin is not None: bnd['-y'] = args.color_ymin if args.color_zmin is not None:", "1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add the polygon to a list of", "VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim, order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim, order='F') show_idx", "renderer :param cam_props: dictionary with options (see above) as keys and settings as", "100, 50), help=\"camera focal point\") # parser.add_argument(\"--campitch\", type=float, default=, help=\"camera pitch\") parser.add_argument(\"--steps\", type=int,", "self._modify_cam() if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save: w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer", ":returns: list of actors with first the actors for tau_list followed by the", "set colors and opacity if not args.colors: print \"Cell color not specified -", "found_im2movie = False __author__ = \"<NAME>\" __copyright__ = \"Copyright 2016\" __credits__ = \"<NAME>\"", "= True if args.saveim: if not args.outdir: args.outdir = args.simdir if not os.path.isdir(args.outdir):", "= vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2)", "p in pix: points.InsertNextPoint(p[0] - .5, p[1] - .5, p[2] - .5) polydata", ":param steps: steps to visualize :param winsize: window size :param bg: background color", "= self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props is not None: self._modify_cam()", "= 0 self.update = update_func self.tmax = tmax self.update_actors = None self.save =", "all actors that will be updated for actor in self.update_actors: ren.RemoveActor(actor) # set", "files found in {}\".format(args.simdir)) # set colors and opacity if not args.colors: print", "args.colors = [get_color(\"grey\") for t in args.celltypes] elif len(args.colors) == 1: args.colors =", "self.save = False # get new actors actors = self.update(t, self.save) self.update_actors =", "parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3)", "0: sys.exit(\"No vtk files found in {}\".format(args.simdir)) # set colors and opacity if", "renderer for actor in actors: self.renderer.AddActor(actor) return actors def _modify_cam(self): \"\"\" Modify the", "image {}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write() return actors def animate(self, tau, tau_colors=None, tau_alpha=None, steps=None,", "elif len(args.alpha) < len(args.celltypes): print \"Number of alpha values does not match number", "if args.movie and found_im2movie: if args.moviedir is None: args.moviedir = args.outdir if args.moviename", "tomp4=args.mp4) elif not found_im2movie: print \"WARNING: Movie generation is turned of because im2movie", "'' if impath is None: impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print 'save", "grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] else: args.colors = [get_color(c) for", "__credits__ = \"<NAME>\" __license__ = \"MIT\" __version__ = \"0.1\" __maintainer__ = \"<NAME>\" #", "for t in args.celltypes] elif len(args.alpha) < len(args.celltypes): print \"Number of alpha values", "steps: steps (all steps are shown when not specified) :param save: save view", "if name in colors.cnames: return cc.to_rgb(name) else: return cc.to_rgb(\"grey\") def parse_args(): parser =", "list with opacity per cell type :param tau_colors: list with color per cell", "of the simulation domain \"\"\" (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData()", "< len(args.celltypes): print \"Number of colors does not match number of cell types", "polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add the polygon to a list", "type=int, nargs=2, help=\"window size\", default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\", help=\"cell types to", "is not None: bnd['-z'] = args.color_zmin if len(bnd) == 0: bnd = {}", "the camera in the renderer. :param renderer: vtk renderer :param cam_props: dictionary with", "return actor for wire frame box of the simulation domain \"\"\" (w, h,", "\"'cell.id' array missing from {} -> skip file\".format(fn) return None return reader.GetOutput() def", "cell types - default to opaque objects\" args.alpha = [1 for t in", "2) polygon.GetPointIds().SetId(3, 3) # Add the polygon to a list of polygons polygons", "type=int, nargs=\"*\", help=\"static cell types (will NOT be updated during animation)\") parser.add_argument(\"--bboxcolor\", type=float,", "if args.color_xmax is not None: bnd['x'] = args.color_xmax if args.color_ymax is not None:", "the camera settings for the renderer. Available options: - position - focal point", "storeafterread=True, bnd_colors=None): self.bbox_color = bbox_color self.cam_props = cam_props self.storeafterread = storeafterread self.bnd_colors =", "mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor", "animate if not os.path.isdir(args.simdir): sys.exit(\"Could not find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No", "file\".format(fn) return None if data.GetPointData().HasArray('cell.type') != 1: print \"'cell.id' array missing from {}", "len(tau)): tau_alpha = [1 for t in tau] if steps is None: steps", "options: - position - focal point - pitch If position and focal point", "step in self.data: return self.data[step] else: if self.storeafterread: self.data[step] = self._load_data(self.files[step]) return self.data[step]", "cam.SetPosition(old_cam.GetPosition()) if 'focal point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in", "color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0, 0, 0), help=\"background color\") parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200,", "type :param tau_colors: list with color per cell type \"\"\" self.renderWindow.SetWindowName('step ' +", "in static_tau] else: cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps)))", "color[1], color[2]) actor.SetMapper(mapper) return actor def _load_data(self, fn): \"\"\" Load vtk files \"\"\"", "import colors found_im2movie = True try: from im2movie import makeMovie except ImportError: found_im2movie", "0 if '-' in tp else 1 if 'x' in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0)", "vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort())", "if static_tau is None: static_tau = [] update_tau = [t for t in", "\"Copyright 2016\" __credits__ = \"<NAME>\" __license__ = \"MIT\" __version__ = \"0.1\" __maintainer__ =", "im2movie import makeMovie except ImportError: found_im2movie = False __author__ = \"<NAME>\" __copyright__ =", "polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add the polygon to a list of polygons", ".5, .5) for t in tau] if (tau_alpha is None) or (len(tau_alpha) is", "save) if len(actors) > 0: cb.update_actors = [actors[tau.index(t)] for t in tau if", "np.unique(sigma[tau == show_tau]) points = vtk.vtkPoints() for s in show_idx: if s not", "simdir: path to folder containing vtk files :param steps: steps to visualize :param", "fps=5, static_tau=None): \"\"\" Animate simulation results :param tau: list of cell types :param", "pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps to animate, all steps will be shown if", "in tau if t not in static_tau] else: cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute)", "all at once \"\"\" def __init__(self, simdir, steps=None, winsize=(800, 800), bg=(0, 0, 0),", "visualize :param winsize: window size :param bg: background color :param bbox_color: bounding box", "not be used for animations :param save: save view to png :param impath:", "self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0], tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is", "_get_bnd_actor(self,tp,color): print 'add boundary for {} with color {}'.format(tp,color) (w, h, d) =", "visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk file every time it is used instead", "not find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No vtk files found in {}\".format(args.simdir))", "!= 1: print \"'cell.id' array missing from {} -> skip file\".format(fn) return None", "self._load_data(f) for n,f in self.files.iteritems()} else: self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} # setup", "stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim, order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau =", "glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0],", "1), help=\"bounding box color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0, 0, 0), help=\"background color\") parser.add_argument(\"--camposition\",", "list with color per cell type :param tau_alpha: list with opacity per cell", "dim = stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim, order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type'))", "<reponame>margrietpalm/VisGrid3D-python #!/usr/bin/env python \"\"\"Visualizes data on a cubic lattice Built specifically to visualize", "not None: self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f)", "args.outdir: args.outdir = args.simdir if not os.path.isdir(args.outdir): print \"Create output directory {}\".format(args.outdir) os.makedirs(args.outdir)", "updating the visualzation class vtkTimerCallback(): def __init__(self, update_func, tmax=1, save=False): self.timer_count = 0", "background color :param bbox_color: bounding box wire frame color :param cam_props: dictionary with", "None: tau_colors = [(0.5, 0.5, 0.5) for tau in tau_list] if tau_alpha is", "self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None): \"\"\" Create actors", "parser.add_argument(\"--campitch\", type=float, default=, help=\"camera pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps to animate, all steps", "options (see above) as keys and settings as values \"\"\" old_cam = self.renderer.GetActiveCamera();", "= vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data = reader.GetOutput() if data.GetPointData().HasArray('cell.id') != 1: print", "Create a mapper and actor mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData)", "v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem), bnd_colors=bnd) #", "2, 2) imageData.SetSpacing(w, h, d) imageData.SetOrigin(0, 0, 0) mapper = vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION", "actors = self.visualize(steps[0], tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is None:", "parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args() def", "visualize :param tau_list: list of cell types :param tau_colors: list with color per", "tau_alpha = [1 for t in tau] if steps is None: steps =", "\"--saveim\", action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make movie after closing the visualization", "wire frame box of the simulation domain \"\"\" (w, h, d) = self.data[self.data.keys()[0]].GetDimensions()", "cell type \"\"\" if isinstance(color, basestring): # convert color to rgb string if", "default to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] else: args.colors =", "type=str, help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make movie", "return actors def _modify_cam(self): \"\"\" Modify the camera settings for the renderer. Available", "tau_alpha: list with opacity per cell type :param steps: steps (all steps are", "# get actors stepdata = self._get_step(step) if stepdata is None: return [] else:", "there is something to animate if not os.path.isdir(args.simdir): sys.exit(\"Could not find {}\".format(args.simdir)) elif", "starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk file every time it is used instead of", "p[2] - .5) polydata = vtk.vtkPolyData() polydata.SetPoints(points) sources = vtk.vtkCubeSource() sources.Update() glyph =", "convert color to rgb string if color in colors.cnames: color = get_color(color) else:", "if t not in static_tau] else: cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId =", "None: self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in", "the polygon to a list of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create", "return None return reader.GetOutput() def visualize(self, step, tau_list, show=False, save=False, impath=None, imprefix=None, bbox=True,", "sigma = sigma.reshape(dim, order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim, order='F') show_idx =", "help=\"window size\", default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\", help=\"cell types to animate\", required=True)", "actor.GetProperty().SetRepresentationToWireframe() return actor def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5, 0.5), opacity=1): \"\"\" Create", "image prefix :param bbox: show bounding box :param tau_alpha: list with opacity per", "t in tau] if (tau_alpha is None) or (len(tau_alpha) is not len(tau)): tau_alpha", "and opacity when they are not specified if tau_colors is None: tau_colors =", "order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim, order='F') show_idx = np.unique(sigma[tau == show_tau])", "else: cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId =", "that will be updated for actor in self.update_actors: ren.RemoveActor(actor) # set t to", "else: color = (0.5, 0.5, 0.5) dim = stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma", "else: self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly:", "tau_alpha=None, bbox=True, bnd=None): \"\"\" Create actors for a list of cell types and", "render window after adding the actors to the renderer, should not be used", "vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data = reader.GetOutput() if data.GetPointData().HasArray('cell.id') != 1: print \"'cell.id'", "None: bnd['y'] = args.color_ymax if args.color_zmax is not None: bnd['z'] = args.color_zmax if", "(will NOT be updated during animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1, 1, 1), help=\"bounding", "type=float, nargs=\"*\", help=\"opacity of the cell types\") parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static cell types", "parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\",", "int(fn.split('_')[-1].replace('.vtk', '')) if steps is not None: self.files = {get_num(f) : f for", "1, 1), help=\"bounding box color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0, 0, 0), help=\"background color\")", "set t to correct value t = self.timer_count if self.timer_count >= self.tmax: t", "import sys import argparse import numpy as np import vtk from vtk.util import", "Visualize a given step. :param step: step to visualize :param tau_list: list of", "# Add the polygon to a list of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon)", "numpy_support as VN from matplotlib import colors found_im2movie = True try: from im2movie", "def __init__(self, simdir, steps=None, winsize=(800, 800), bg=(0, 0, 0), bbox_color=(1, 1, 1), cam_props=None,", "movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3)", "is not None: bnd['z'] = args.color_zmax if args.color_xmin is not None: bnd['-x'] =", "vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper =", "default to opaque objects\" args.alpha = [1 for t in args.celltypes] bnd =", "in args.celltypes] elif len(args.colors) < len(args.celltypes): print \"Number of colors does not match", "self.renderWindow.SetWindowName('step ' + str(int(step))) actors = self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if", "start the render window after adding the actors to the renderer, should not", "help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3)", "bnd['-z'] = args.color_zmin if len(bnd) == 0: bnd = {} # set saving", "os.makedirs(args.outdir) if not args.imprefix: args.imprefix = \"frame\" # set camera cam_props = {'position':", "- default to opaque objects\" args.alpha = [1 for t in args.celltypes] elif", "type=float, nargs=3, default=(-200, 200, 200), help=\"camera position\") parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100, 100, 50),", "tau] if steps is None: steps = self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors = self.visualize(steps[0],", "# create and store movie if args.movie and found_im2movie: if args.moviedir is None:", "for {} with color {}'.format(tp,color) (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints()", "static_tau=None): \"\"\" Animate simulation results :param tau: list of cell types :param tau_colors:", "# check if there is something to animate if not os.path.isdir(args.simdir): sys.exit(\"Could not", "parser.add_argument(\"--moviename\", type=str, help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all data at once before the", "and settings as values \"\"\" old_cam = self.renderer.GetActiveCamera(); cam = vtk.vtkCamera() if 'position'", "Load vtk files \"\"\" reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data = reader.GetOutput()", "in colors.cnames: color = get_color(color) else: color = (0.5, 0.5, 0.5) dim =", "window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all", "PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper and actor mapper", "= vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is not None", "of cell types :param show: initialize and start the render window after adding", "fly instead of all at once \"\"\" def __init__(self, simdir, steps=None, winsize=(800, 800),", "== s)) for p in pix: points.InsertNextPoint(p[0] - .5, p[1] - .5, p[2]", "args.colors = [get_color(c) for c in args.colors] if not args.alpha: print \"Alpha values", "if args.moviename is None: args.moviename = args.imprefix makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir, args.fps,", "f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps} else: self.files = {get_num(f) : f", "self.visualize(steps[t], update_tau, show=False, save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath) cb = vtkTimerCallback(update_func, len(steps),", "to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] else: args.colors = [get_color(c)", "static_tau=args.static) # create and store movie if args.movie and found_im2movie: if args.moviedir is", "if isinstance(color, basestring): # convert color to rgb string if color in colors.cnames:", "matplotlib import colors found_im2movie = True try: from im2movie import makeMovie except ImportError:", "of \" \"keeping it in memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie windows compatible\") parser.add_argument(\"--mp4\",", "to store image :param imprefix: image prefix :param fps: frames per second :param", "i, t in enumerate(tau) if t in update_tau] update_alpha = [tau_alpha[i] for i,", "args.color_zmin is not None: bnd['-z'] = args.color_zmin if len(bnd) == 0: bnd =", "args.simdir if not os.path.isdir(args.outdir): print \"Create output directory {}\".format(args.outdir) os.makedirs(args.outdir) if not args.imprefix:", "compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float,", "n,f in self.files.iteritems()} else: self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} # setup renderer self._set_renderer(winsize,", "image prefix :param fps: frames per second :param static_tau: static cell types that", "to visualize :param winsize: window size :param bg: background color :param bbox_color: bounding", "argparse.ArgumentParser() # parser.description(\"Animate 3D Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\",", "rgb string if color in colors.cnames: color = get_color(color) else: color = (0.5,", "types and add them to the renderer :param step: step to visualize :param", "= vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None):", "in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print 'add boundary for {} with color", "actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for i, tau in enumerate(tau_list)] # get", ":param simdir: path to folder containing vtk files :param steps: steps to visualize", "colors does not match number of cell types - default to grey\" args.colors", "does not match number of cell types - default to opaque objects\" args.alpha", "renderer, should not be used for animations :param save: save view to png", "and timer self.renderWindowInteractor.Start() def get_color(name): \"\"\" Get color for matplotlib color name \"\"\"", "= vtk.vtkPoints() for s in show_idx: if s not in sigma: continue pix", "vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2])", "types :param show: initialize and start the render window after adding the actors", "simulation domain \"\"\" (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData() imageData.SetDimensions(2, 2,", "0, 0), bbox_color=(1, 1, 1), cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color = bbox_color self.cam_props", "'_' if imprefix is None: imprefix = '' if impath is None: impath", "if step in self.data: return self.data[step] else: if self.storeafterread: self.data[step] = self._load_data(self.files[step]) return", "polydata = vtk.vtkPolyData() polydata.SetPoints(points) sources = vtk.vtkCubeSource() sources.Update() glyph = vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION", "for tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add actors to the renderer for actor", "type \"\"\" self.renderWindow.SetWindowName('step ' + str(int(step))) actors = self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd)", "= parse_args() # check if there is something to animate if not os.path.isdir(args.simdir):", ":param tau_colors: list with color per cell type :param tau_alpha: list with opacity", "i, t in enumerate(tau) if t in update_tau] update_func = lambda t, s:", "data get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk', '')) if steps is not None: self.files", "0.5, 0.5) dim = stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim, order='F') tau", "fn): \"\"\" Load vtk files \"\"\" reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data", "old_cam = self.renderer.GetActiveCamera(); cam = vtk.vtkCamera() if 'position' in self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition())", "__init__(self, update_func, tmax=1, save=False): self.timer_count = 0 self.update = update_func self.tmax = tmax", "= lambda t, s: self.visualize(steps[t], update_tau, show=False, save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath)", "# add actors to the renderer for actor in actors: self.renderer.AddActor(actor) return actors", "= [get_color(c) for c in args.colors] if not args.alpha: print \"Alpha values not", "parser = argparse.ArgumentParser() # parser.description(\"Animate 3D Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\", help=\"Simulation", "tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None): \"\"\" Create actors for a list of cell", "+= 1 class Visualizer3D(): \"\"\" Create visualizer object :param simdir: path to folder", "is None: tau_colors = [(0.5, 0.5, 0.5) for tau in tau_list] if tau_alpha", "folder containing vtk files :param steps: steps to visualize :param winsize: window size", "actor def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5, 0.5), opacity=1): \"\"\" Create actor for", "at once \"\"\" def __init__(self, simdir, steps=None, winsize=(800, 800), bg=(0, 0, 0), bbox_color=(1,", "in enumerate(tau_list)] # get bounding box wire frame if bbox: actors.append(self._get_box_actor()) if bnd", "# create visualizer v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not", "(see above) as keys and settings as values \"\"\" old_cam = self.renderer.GetActiveCamera(); cam", "# the vtkTimerCallback takes care of updating the visualzation class vtkTimerCallback(): def __init__(self,", "visualization window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read", "None: for tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add actors to the renderer for", "writer.Write() return actors def animate(self, tau, tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None, imprefix=None, fps=5,", "parse_args(): parser = argparse.ArgumentParser() # parser.description(\"Animate 3D Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\",", "camera settings :param onthefly: read data on the fly instead of all at", "return None if data.GetPointData().HasArray('cell.type') != 1: print \"'cell.id' array missing from {} ->", "args.color_xmax if args.color_ymax is not None: bnd['y'] = args.color_ymax if args.color_zmax is not", "vtkTimerCallback(): def __init__(self, update_func, tmax=1, save=False): self.timer_count = 0 self.update = update_func self.tmax", "nargs=2, help=\"window size\", default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\", help=\"cell types to animate\",", "= self.timer_count if self.timer_count >= self.tmax: t = self.timer_count % self.tmax self.save =", "glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor()", "actor for a cell type \"\"\" if isinstance(color, basestring): # convert color to", "self.visualize(steps[0], tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is None: static_tau =", "glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1],", "nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args() def main(): args = parse_args() #", "the renderer :param step: step to visualize :param tau_list: list of cell types", "= \"0.1\" __maintainer__ = \"<NAME>\" # the vtkTimerCallback takes care of updating the", "color = (0.5, 0.5, 0.5) dim = stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma =", "vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create", "vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2]) return actor def _get_box_actor(self): \"\"\" Create and return", "vtk file every time it is used instead of \" \"keeping it in", "self.save = save def execute(self, obj, event): iren = obj win = iren.GetRenderWindow()", "settings for the renderer. Available options: - position - focal point - pitch", "None) or (len(tau_alpha) is not len(tau)): tau_alpha = [1 for t in tau]", "elif len(args.colors) < len(args.celltypes): print \"Number of colors does not match number of", "if stepdata is None: return [] else: actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i])", "\"\"\" Visualize a given step. :param step: step to visualize :param tau_list: list", "step to visualize :param tau_list: list of cell types :param show: initialize and", "interaction and timer self.renderWindowInteractor.Start() def get_color(name): \"\"\" Get color for matplotlib color name", "to visualize :param tau_list: list of cell types :param tau_colors: list with color", "in enumerate(tau) if t in update_tau] update_alpha = [tau_alpha[i] for i, t in", "args.saveim = True if args.saveim: if not args.outdir: args.outdir = args.simdir if not", "as VN from matplotlib import colors found_im2movie = True try: from im2movie import", "\"\"\" Get color for matplotlib color name \"\"\" cc = colors.ColorConverter() if name", "_get_box_actor(self): \"\"\" Create and return actor for wire frame box of the simulation", "{n : self._load_data(f) for n,f in self.files.iteritems()} else: self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])}", "missing from {} -> skip file\".format(fn) return None return reader.GetOutput() def visualize(self, step,", "initialize and start the render window after adding the actors to the renderer,", "from {} -> skip file\".format(fn) return None return reader.GetOutput() def visualize(self, step, tau_list,", "'save image {}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write() return actors def animate(self, tau, tau_colors=None, tau_alpha=None,", "fps: frames per second :param static_tau: static cell types that should not be", "\"\"\" old_cam = self.renderer.GetActiveCamera(); cam = vtk.vtkCamera() if 'position' in self.cam_props: cam.SetPosition(self.cam_props['position']) else:", "it in memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4", "VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim, order='F') show_idx = np.unique(sigma[tau == show_tau]) points = vtk.vtkPoints()", "\"\"\" Create actors for a list of cell types and add them to", "bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem), bnd_colors=bnd) # start animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha,", "= [(0.5, 0.5, 0.5) for tau in tau_list] if tau_alpha is None: tau_alpha", "static cell types that should not be updated during the animation \"\"\" if", "bbox: show bounding box :param tau_alpha: list with opacity per cell type :param", "tau_colors is None: tau_colors = [(0.5, 0.5, 0.5) for tau in tau_list] if", "update_tau, show=False, save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath) cb = vtkTimerCallback(update_func, len(steps), save)", "\"--winsize\", type=int, nargs=2, help=\"window size\", default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\", help=\"cell types", "imageData.SetSpacing(w, h, d) imageData.SetOrigin(0, 0, 0) mapper = vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <= 5:", "color {}'.format(tp,color) (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints() f = 0", "step, tau_list, show=False, save=False, impath=None, imprefix=None, bbox=True, tau_alpha=None, tau_colors=None, bnd=None): \"\"\" Visualize a", "args.color_xmin if args.color_ymin is not None: bnd['-y'] = args.color_ymin if args.color_zmin is not", "== 0: bnd = {} # set saving options if args.imprefix or args.outdir", "update_func = lambda t, s: self.visualize(steps[t], update_tau, show=False, save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix,", "class Visualizer3D(): \"\"\" Create visualizer object :param simdir: path to folder containing vtk", "\"Create output directory {}\".format(args.outdir) os.makedirs(args.outdir) if not args.imprefix: args.imprefix = \"frame\" # set", "t in args.celltypes] elif len(args.alpha) < len(args.celltypes): print \"Number of alpha values does", "mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2]) return actor def", "None and imprefix.endswith('_'): imprefix = imprefix + '_' if imprefix is None: imprefix", "color not specified - default to grey\" args.colors = [get_color(\"grey\") for t in", "renderer. Available options: - position - focal point - pitch If position and", "with color per cell type :param tau_alpha: list with opacity per cell type", "animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1, 1, 1), help=\"bounding box color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3,", "= (0.5, 0.5, 0.5) dim = stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim,", "sigma.reshape(dim, order='F') tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim, order='F') show_idx = np.unique(sigma[tau ==", "frames per second :param static_tau: static cell types that should not be updated", ":param show: initialize and start the render window after adding the actors to", "default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\", type=int, nargs=\"*\", help=\"cell types to animate\", required=True) parser.add_argument(\"-c\", \"--colors\",", "get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None): \"\"\" Create actors for a list", "self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} # setup renderer self._set_renderer(winsize, bg) def _get_step(self,step): \"\"\"", "directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\", \"--movie\",", "path to store image :param imprefix: image prefix :param bbox: show bounding box", "ImportError: found_im2movie = False __author__ = \"<NAME>\" __copyright__ = \"Copyright 2016\" __credits__ =", "mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float,", "\" \"is not specified\") parser.add_argument(\"-f\", \"--fps\", type=float, default=5, help=\"frames per second\") parser.add_argument(\"-o\", \"--outdir\",", "- default to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] elif len(args.colors)", "# start the interaction and timer self.renderWindowInteractor.Start() def get_color(name): \"\"\" Get color for", "if s not in sigma: continue pix = np.column_stack(np.where(sigma == s)) for p", "to a list of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData", "= {} # set saving options if args.imprefix or args.outdir or args.movie: args.saveim", "__copyright__ = \"Copyright 2016\" __credits__ = \"<NAME>\" __license__ = \"MIT\" __version__ = \"0.1\"", "if args.color_zmin is not None: bnd['-z'] = args.color_zmin if len(bnd) == 0: bnd", "cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem), bnd_colors=bnd) # start animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps,", "is not None: self._modify_cam() if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save: w2i = vtk.vtkWindowToImageFilter()", "def _set_renderer(self, winsize, bg): \"\"\" Set up vtk renderer \"\"\" self.renderer = vtk.vtkRenderer()", "for t in tau] if (tau_alpha is None) or (len(tau_alpha) is not len(tau)):", "= args.color_zmax if args.color_xmin is not None: bnd['-x'] = args.color_xmin if args.color_ymin is", "vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper) return actor def", "= vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper) return actor def _load_data(self, fn): \"\"\"", "visualizer v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem), bnd_colors=bnd)", "None: return [] else: actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for i, tau", "adding the actors to the renderer, should not be used for animations :param", "t in enumerate(tau) if t in update_tau] update_func = lambda t, s: self.visualize(steps[t],", "tau if t not in static_tau] update_colors = [tau_colors[i] for i, t in", "actors with first the actors for tau_list followed by the bounding box (if", "self.storeafterread: self.data[step] = self._load_data(self.files[step]) return self.data[step] else: return self._load_data(self.files[step]) def _set_renderer(self, winsize, bg):", "args.alpha = [args.alpha for t in args.celltypes] elif len(args.alpha) < len(args.celltypes): print \"Number", "color[2]) return actor def _get_box_actor(self): \"\"\" Create and return actor for wire frame", "vtk.vtkImageData() imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w, h, d) imageData.SetOrigin(0, 0, 0) mapper = vtk.vtkDataSetMapper()", "action=\"store_true\", help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float,", "np import vtk from vtk.util import numpy_support as VN from matplotlib import colors", "help=\"colors or the cell types\") parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\", help=\"opacity of the cell", "renderer \"\"\" self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor", ":param bbox: show bounding box :returns: list of actors with first the actors", "points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y' in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z' in", "data = reader.GetOutput() if data.GetPointData().HasArray('cell.id') != 1: print \"'cell.id' array missing from {}", "def _modify_cam(self): \"\"\" Modify the camera settings for the renderer. Available options: -", "= [1 for t in tau] if steps is None: steps = self.files.keys()", "# set saving options if args.imprefix or args.outdir or args.movie: args.saveim = True", "def visualize(self, step, tau_list, show=False, save=False, impath=None, imprefix=None, bbox=True, tau_alpha=None, tau_colors=None, bnd=None): \"\"\"", "is None: impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print 'save image {}/{}{:03d}.png'.format(impath, imprefix,", "0.5), opacity=1): \"\"\" Create actor for a cell type \"\"\" if isinstance(color, basestring):", "tau_list, show=False, save=False, impath=None, imprefix=None, bbox=True, tau_alpha=None, tau_colors=None, bnd=None): \"\"\" Visualize a given", "\"\"\" self.renderWindow.SetWindowName('step ' + str(int(step))) actors = self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render()", "from matplotlib import colors found_im2movie = True try: from im2movie import makeMovie except", "number of cell types - default to opaque objects\" args.alpha = [1 for", "if args.color_zmax is not None: bnd['z'] = args.color_zmax if args.color_xmin is not None:", "win.GetRenderers().GetFirstRenderer() # remove all actors that will be updated for actor in self.update_actors:", "vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is not None and imprefix.endswith('_'): imprefix = imprefix +", "default=(-200, 200, 200), help=\"camera position\") parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100, 100, 50), help=\"camera focal", ":param tau_alpha: list with opacity per cell type :param bbox: show bounding box", "saving options if args.imprefix or args.outdir or args.movie: args.saveim = True if args.saveim:", "them to the renderer :param step: step to visualize :param tau_list: list of", "per second :param static_tau: static cell types that should not be updated during", "steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static) # create and store movie if args.movie", "cam_props: dictionary with camera settings :param onthefly: read data on the fly instead", "in actors: self.renderer.AddActor(actor) return actors def _modify_cam(self): \"\"\" Modify the camera settings for", "if t in update_tau] update_alpha = [tau_alpha[i] for i, t in enumerate(tau) if", "in args.celltypes] elif len(args.colors) == 1: args.colors = [get_color(args.colors[0]) for t in args.celltypes]", "#!/usr/bin/env python \"\"\"Visualizes data on a cubic lattice Built specifically to visualize the", "= lambda fn: int(fn.split('_')[-1].replace('.vtk', '')) if steps is not None: self.files = {get_num(f)", "get_num(f) in steps} else: self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir))}", ".5, p[2] - .5) polydata = vtk.vtkPolyData() polydata.SetPoints(points) sources = vtk.vtkCubeSource() sources.Update() glyph", "get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk', '')) if steps is not None: self.files =", "= storeafterread self.bnd_colors = bnd_colors # read data get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk',", "= vtk.vtkCamera() if 'position' in self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if 'focal point' in", "box wire frame if bbox: actors.append(self._get_box_actor()) if bnd is not None: for tp,color", "points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y' in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z'", ":param tau_alpha: list with opacity per cell type :param steps: steps (all steps", "parser.parse_args() def main(): args = parse_args() # check if there is something to", "simdir, steps=None, winsize=(800, 800), bg=(0, 0, 0), bbox_color=(1, 1, 1), cam_props=None, onthefly=False, storeafterread=True,", "self.data = {n : self._load_data(f) for n,f in self.files.iteritems()} else: self.data = {self.files.keys()[0]", "(w, h, d) = self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData() imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w, h,", "for i, t in enumerate(tau) if t in update_tau] update_func = lambda t,", "the simulation domain \"\"\" (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData() imageData.SetDimensions(2,", "def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5, 0.5), opacity=1): \"\"\" Create actor for a", "that should not be updated during the animation \"\"\" if (tau_colors is None)", "tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props is not None: self._modify_cam() if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start()", "= \"<NAME>\" __copyright__ = \"Copyright 2016\" __credits__ = \"<NAME>\" __license__ = \"MIT\" __version__", "self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor()", "for t in args.celltypes] elif len(args.alpha) == 1: args.alpha = [args.alpha for t", "\"Cell color not specified - default to grey\" args.colors = [get_color(\"grey\") for t", "specific step \"\"\" if step in self.data: return self.data[step] else: if self.storeafterread: self.data[step]", "and add them to the renderer :param step: step to visualize :param tau_list:", "{}'.format(tp,color) (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints() f = 0 if", "{get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps} else: self.files", "5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor =", "= timerId # start the interaction and timer self.renderWindowInteractor.Start() def get_color(name): \"\"\" Get", "given, they will be taken from the camera in the renderer. :param renderer:", "t = self.timer_count if self.timer_count >= self.tmax: t = self.timer_count % self.tmax self.save", "main(): args = parse_args() # check if there is something to animate if", "once \"\"\" def __init__(self, simdir, steps=None, winsize=(800, 800), bg=(0, 0, 0), bbox_color=(1, 1,", "\"'cell.id' array missing from {} -> skip file\".format(fn) return None if data.GetPointData().HasArray('cell.type') !=", "imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w, h, d) imageData.SetOrigin(0, 0, 0) mapper = vtk.vtkDataSetMapper() if", "name in colors.cnames: return cc.to_rgb(name) else: return cc.to_rgb(\"grey\") def parse_args(): parser = argparse.ArgumentParser()", "to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] elif len(args.colors) == 1:", "get_color(name): \"\"\" Get color for matplotlib color name \"\"\" cc = colors.ColorConverter() if", "f = 0 if '-' in tp else 1 if 'x' in tp:", "shown when not specified) :param save: save view to png :param impath: path", "actors.append(self._get_bnd_actor(tp,color)) # add actors to the renderer for actor in actors: self.renderer.AddActor(actor) return", "position\") parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100, 100, 50), help=\"camera focal point\") # parser.add_argument(\"--campitch\", type=float,", "= \"Copyright 2016\" __credits__ = \"<NAME>\" __license__ = \"MIT\" __version__ = \"0.1\" __maintainer__", "in tau] if steps is None: steps = self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors =", "types - default to grey\" args.colors = [get_color(\"grey\") for t in args.celltypes] else:", "tau_colors = [(.5, .5, .5) for t in tau] if (tau_alpha is None)", "imprefix = imprefix + '_' if imprefix is None: imprefix = '' if", "if data.GetPointData().HasArray('cell.type') != 1: print \"'cell.id' array missing from {} -> skip file\".format(fn)", "<= 5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe()", "storeafterread self.bnd_colors = bnd_colors # read data get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk', ''))", "bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is None: static_tau = [] update_tau = [t", "import os import glob import sys import argparse import numpy as np import", "vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper and actor mapper = vtk.vtkPolyDataMapper() if", "= [(.5, .5, .5) for t in tau] if (tau_alpha is None) or", "nargs=3, default=(0, 0, 0), help=\"background color\") parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200, 200, 200), help=\"camera", "save: save view to png :param impath: path to store image :param imprefix:", "= tmax self.update_actors = None self.save = save def execute(self, obj, event): iren", "or args.movie: args.saveim = True if args.saveim: if not args.outdir: args.outdir = args.simdir", "- default to opaque objects\" args.alpha = [1 for t in args.celltypes] bnd", "= {} if args.color_xmax is not None: bnd['x'] = args.color_xmax if args.color_ymax is", "args.color_ymax is not None: bnd['y'] = args.color_ymax if args.color_zmax is not None: bnd['z']", "on the fly instead of all at once \"\"\" def __init__(self, simdir, steps=None,", "create visualizer v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem),", "not args.colors: print \"Cell color not specified - default to grey\" args.colors =", "files :param steps: steps to visualize :param winsize: window size :param bg: background", "prefix :param fps: frames per second :param static_tau: static cell types that should", "1 if 'x' in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y' in tp:", "\"--movie\", action=\"store_true\", help=\"make movie after closing the visualization window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\")", "window after adding the actors to the renderer, should not be used for", "if impath is None: impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print 'save image", "help=\"background color\") parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200, 200, 200), help=\"camera position\") parser.add_argument(\"--camfocus\", type=float, nargs=3,", "colors and opacity if not args.colors: print \"Cell color not specified - default", "polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a", "self.timer_count += 1 class Visualizer3D(): \"\"\" Create visualizer object :param simdir: path to", "bg[1], bg[2]) self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def", "parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make movie after closing the", "by the bounding box (if applicable) \"\"\" # set default colors and opacity", "1: args.colors = [get_color(args.colors[0]) for t in args.celltypes] elif len(args.colors) < len(args.celltypes): print", "self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None): \"\"\" Create", "= [get_color(\"grey\") for t in args.celltypes] elif len(args.colors) == 1: args.colors = [get_color(args.colors[0])", "reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data = reader.GetOutput() if data.GetPointData().HasArray('cell.id') != 1:", "colors.ColorConverter() if name in colors.cnames: return cc.to_rgb(name) else: return cc.to_rgb(\"grey\") def parse_args(): parser", "= tau.reshape(dim, order='F') show_idx = np.unique(sigma[tau == show_tau]) points = vtk.vtkPoints() for s", "actor mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor =", "parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\", help=\"colors or the cell types\") parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\",", "0), bbox_color=(1, 1, 1), cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color = bbox_color self.cam_props =", "in colors.cnames: return cc.to_rgb(name) else: return cc.to_rgb(\"grey\") def parse_args(): parser = argparse.ArgumentParser() #", "default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2, help=\"window size\", default=(800, 800)) parser.add_argument(\"-t\", \"--celltypes\",", "help=\"make movie windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make mp4 movie\") parser.add_argument(\"--color_xmin\",type=float, nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3)", "Visualizer3D(): \"\"\" Create visualizer object :param simdir: path to folder containing vtk files", "sources = vtk.vtkCubeSource() sources.Update() glyph = vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata) else:", "not None: bnd['-z'] = args.color_zmin if len(bnd) == 0: bnd = {} #", "return actor def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5, 0.5), opacity=1): \"\"\" Create actor", "of cell types and add them to the renderer :param step: step to", "'x' in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y' in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0)", "points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1,", "vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0],", "values \"\"\" old_cam = self.renderer.GetActiveCamera(); cam = vtk.vtkCamera() if 'position' in self.cam_props: cam.SetPosition(self.cam_props['position'])", "# remove all actors that will be updated for actor in self.update_actors: ren.RemoveActor(actor)", "imageData = vtk.vtkImageData() imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w, h, d) imageData.SetOrigin(0, 0, 0) mapper", "cell types\") parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\", help=\"opacity of the cell types\") parser.add_argument(\"--static\", type=int,", "bounding box wire frame color :param cam_props: dictionary with camera settings :param onthefly:", "mapper = vtk.vtkDataSetMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor = vtk.vtkActor()", "in self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if 'focal point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else:", "parser.add_argument(\"-m\", \"--movie\", action=\"store_true\", help=\"make movie after closing the visualization window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie", "def main(): args = parse_args() # check if there is something to animate", "points.InsertNextPoint(0,f*h,d) elif 'z' in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4)", "default=(0, 0, 0), help=\"background color\") parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200, 200, 200), help=\"camera position\")", "polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper and actor mapper =", "if not os.path.isdir(args.simdir): sys.exit(\"Could not find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No vtk", "for t in args.celltypes] elif len(args.colors) < len(args.celltypes): print \"Number of colors does", "tau_list followed by the bounding box (if applicable) \"\"\" # set default colors", "to visualize the VTK files created by Morpheus \"\"\" import os import glob", "all data at once before the visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk file", "if not args.alpha: print \"Alpha values not specified - default to opaque objects\"", "Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2, help=\"window", "created by Morpheus \"\"\" import os import glob import sys import argparse import", "time it is used instead of \" \"keeping it in memory\") parser.add_argument(\"--win\", action=\"store_true\",", "args = parse_args() # check if there is something to animate if not", "_get_step(self,step): \"\"\" Retrieve vtk data for a specific step \"\"\" if step in", "0.5, 0.5) for tau in tau_list] if tau_alpha is None: tau_alpha = [1", "print \"'cell.id' array missing from {} -> skip file\".format(fn) return None return reader.GetOutput()", "not given, they will be taken from the camera in the renderer. :param", "if tau_alpha is None: tau_alpha = [1 for tau in tau_list] # get", "if args.color_xmin is not None: bnd['-x'] = args.color_xmin if args.color_ymin is not None:", "if self.timer_count >= self.tmax: t = self.timer_count % self.tmax self.save = False #", "will be taken from the camera in the renderer. :param renderer: vtk renderer", "for f in glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly: self.data = {n : self._load_data(f) for", "taken from the camera in the renderer. :param renderer: vtk renderer :param cam_props:", "start the interaction and timer self.renderWindowInteractor.Start() def get_color(name): \"\"\" Get color for matplotlib", "animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static) # create and", "ren = win.GetRenderers().GetFirstRenderer() # remove all actors that will be updated for actor", "in tau if t not in static_tau] update_colors = [tau_colors[i] for i, t", "start animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static) # create", "= [args.alpha for t in args.celltypes] elif len(args.alpha) < len(args.celltypes): print \"Number of", ":param save: save view to png :param impath: path to store image :param", "update_tau] update_func = lambda t, s: self.visualize(steps[t], update_tau, show=False, save=s, bbox=False, tau_alpha=update_alpha, tau_colors=update_colors,", "data on a cubic lattice Built specifically to visualize the VTK files created", "to png :param impath: path to store image :param imprefix: image prefix :param", "= False __author__ = \"<NAME>\" __copyright__ = \"Copyright 2016\" __credits__ = \"<NAME>\" __license__", "elif 'y' in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z' in tp: points.InsertNextPoint(0,0,f*d)", "as values \"\"\" old_cam = self.renderer.GetActiveCamera(); cam = vtk.vtkCamera() if 'position' in self.cam_props:", "t in update_tau] update_func = lambda t, s: self.visualize(steps[t], update_tau, show=False, save=s, bbox=False,", "opacity=1): \"\"\" Create actor for a cell type \"\"\" if isinstance(color, basestring): #", ":param imprefix: image prefix :param bbox: show bounding box :param tau_alpha: list with", "shown if this \" \"is not specified\") parser.add_argument(\"-f\", \"--fps\", type=float, default=5, help=\"frames per", "number of cell types - default to grey\" args.colors = [get_color(\"grey\") for t", "in update_tau] update_func = lambda t, s: self.visualize(steps[t], update_tau, show=False, save=s, bbox=False, tau_alpha=update_alpha,", "200, 200), help=\"camera position\") parser.add_argument(\"--camfocus\", type=float, nargs=3, default=(100, 100, 50), help=\"camera focal point\")", "frame color :param cam_props: dictionary with camera settings :param onthefly: read data on", "reader.GetOutput() if data.GetPointData().HasArray('cell.id') != 1: print \"'cell.id' array missing from {} -> skip", "make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) # Add", "opaque objects\" args.alpha = [1 for t in args.celltypes] elif len(args.alpha) == 1:", "point\") # parser.add_argument(\"--campitch\", type=float, default=, help=\"camera pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps to animate,", "options if args.imprefix or args.outdir or args.movie: args.saveim = True if args.saveim: if", "the vtkTimerCallback takes care of updating the visualzation class vtkTimerCallback(): def __init__(self, update_func,", "\"<NAME>\" # the vtkTimerCallback takes care of updating the visualzation class vtkTimerCallback(): def", "nargs=3, default=(1, 1, 1), help=\"bounding box color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0, 0, 0),", "with color per cell type \"\"\" self.renderWindow.SetWindowName('step ' + str(int(step))) actors = self.get_actors(step,", ":param onthefly: read data on the fly instead of all at once \"\"\"", ": self._load_data(f) for n,f in self.files.iteritems()} else: self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])} #", "__init__(self, simdir, steps=None, winsize=(800, 800), bg=(0, 0, 0), bbox_color=(1, 1, 1), cam_props=None, onthefly=False,", "tau_colors=None, tau_alpha=None, bbox=True, bnd=None): \"\"\" Create actors for a list of cell types", "if not args.imprefix: args.imprefix = \"frame\" # set camera cam_props = {'position': args.camposition,", "tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type')) tau = tau.reshape(dim, order='F') show_idx = np.unique(sigma[tau == show_tau]) points", "per cell type :param tau_colors: list with color per cell type \"\"\" self.renderWindow.SetWindowName('step", "default=(1, 1, 1), help=\"bounding box color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0, 0, 0), help=\"background", "or args.outdir or args.movie: args.saveim = True if args.saveim: if not args.outdir: args.outdir", "t in args.celltypes] elif len(args.alpha) == 1: args.alpha = [args.alpha for t in", "visualize the VTK files created by Morpheus \"\"\" import os import glob import", "mapper.SetInputData(imageData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor def _get_actor_for_tau(self,", "# get new actors actors = self.update(t, self.save) self.update_actors = actors self.timer_count +=", "= vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper)", "5: mapper.SetInput(imageData) else: mapper.SetInputData(imageData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return", "vtk files found in {}\".format(args.simdir)) # set colors and opacity if not args.colors:", "if not args.outdir: args.outdir = args.simdir if not os.path.isdir(args.outdir): print \"Create output directory", "<= 5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor", "os.path.isdir(args.outdir): print \"Create output directory {}\".format(args.outdir) os.makedirs(args.outdir) if not args.imprefix: args.imprefix = \"frame\"", "if 'x' in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y' in tp: points.InsertNextPoint(0,f*h,0)", "default=, help=\"camera pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps to animate, all steps will be", "store movie if args.movie and found_im2movie: if args.moviedir is None: args.moviedir = args.outdir", "t = self.timer_count % self.tmax self.save = False # get new actors actors", "self.bbox_color = bbox_color self.cam_props = cam_props self.storeafterread = storeafterread self.bnd_colors = bnd_colors #", "len(args.alpha) < len(args.celltypes): print \"Number of alpha values does not match number of", "imprefix: image prefix :param fps: frames per second :param static_tau: static cell types", "color in colors.cnames: color = get_color(color) else: color = (0.5, 0.5, 0.5) dim", "from the camera in the renderer. :param renderer: vtk renderer :param cam_props: dictionary", "= np.column_stack(np.where(sigma == s)) for p in pix: points.InsertNextPoint(p[0] - .5, p[1] -", "or the cell types\") parser.add_argument(\"-a\", \"--alpha\", type=float, nargs=\"*\", help=\"opacity of the cell types\")", "True try: from im2movie import makeMovie except ImportError: found_im2movie = False __author__ =", "print 'add boundary for {} with color {}'.format(tp,color) (w, h, d) = self.data[self.data.keys()[0]].GetDimensions()", "colors.cnames: return cc.to_rgb(name) else: return cc.to_rgb(\"grey\") def parse_args(): parser = argparse.ArgumentParser() # parser.description(\"Animate", "cell type :param steps: steps (all steps are shown when not specified) :param", "path to folder containing vtk files :param steps: steps to visualize :param winsize:", "actors to the renderer for actor in actors: self.renderer.AddActor(actor) return actors def _modify_cam(self):", "None: bnd['-y'] = args.color_ymin if args.color_zmin is not None: bnd['-z'] = args.color_zmin if", "\"\"\" self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow = vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor =", "a mapper and actor mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData) else:", "used for animations :param save: save view to png :param impath: path to", "= [] update_tau = [t for t in tau if t not in", "after closing the visualization window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie name\")", "if 'position' in self.cam_props: cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if 'focal point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal", "the render window after adding the actors to the renderer, should not be", "self.timer_count >= self.tmax: t = self.timer_count % self.tmax self.save = False # get", "object :param simdir: path to folder containing vtk files :param steps: steps to", "help=\"camera pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps to animate, all steps will be shown", "if self.cam_props is not None: self._modify_cam() if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save: w2i", "= '' if impath is None: impath = '.' writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step)) print", "updated during animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1, 1, 1), help=\"bounding box color\") parser.add_argument(\"--bgcolor\",", "_get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5, 0.5), opacity=1): \"\"\" Create actor for a cell", "step \"\"\" if step in self.data: return self.data[step] else: if self.storeafterread: self.data[step] =", "step) writer.Write() return actors def animate(self, tau, tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None, imprefix=None,", "\"\"\" Animate simulation results :param tau: list of cell types :param tau_colors: list", "self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd) self.renderWindow.Render() if self.cam_props is not None: self._modify_cam() if", "vtk files \"\"\" reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data = reader.GetOutput() if", "in update_tau] update_alpha = [tau_alpha[i] for i, t in enumerate(tau) if t in", "= np.unique(sigma[tau == show_tau]) points = vtk.vtkPoints() for s in show_idx: if s", "bnd = {} # set saving options if args.imprefix or args.outdir or args.movie:", "renderer: vtk renderer :param cam_props: dictionary with options (see above) as keys and", "show=False, save=False, impath=None, imprefix=None, bbox=True, tau_alpha=None, tau_colors=None, bnd=None): \"\"\" Visualize a given step.", "help=\"bounding box color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0, 0, 0), help=\"background color\") parser.add_argument(\"--camposition\", type=float,", "vtk data for a specific step \"\"\" if step in self.data: return self.data[step]", "and found_im2movie: if args.moviedir is None: args.moviedir = args.outdir if args.moviename is None:", "i, tau in enumerate(tau_list)] # get bounding box wire frame if bbox: actors.append(self._get_box_actor())", "show_idx: if s not in sigma: continue pix = np.column_stack(np.where(sigma == s)) for", "lattice Built specifically to visualize the VTK files created by Morpheus \"\"\" import", "parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\", help=\"Simulation folder\") parser.add_argument(\"-w\", \"--winsize\", type=int, nargs=2, help=\"window size\", default=(800,", "color :param cam_props: dictionary with camera settings :param onthefly: read data on the", "args.moviedir, args.fps, win=args.win, tomp4=args.mp4) elif not found_im2movie: print \"WARNING: Movie generation is turned", "impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static) # create and store movie if args.movie and found_im2movie:", ":param tau_list: list of cell types :param show: initialize and start the render", "pix: points.InsertNextPoint(p[0] - .5, p[1] - .5, p[2] - .5) polydata = vtk.vtkPolyData()", "type=float, default=, help=\"camera pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps to animate, all steps will", "self.data[self.data.keys()[0]].GetDimensions() imageData = vtk.vtkImageData() imageData.SetDimensions(2, 2, 2) imageData.SetSpacing(w, h, d) imageData.SetOrigin(0, 0, 0)", "type :param bbox: show bounding box :returns: list of actors with first the", "and focal point are not given, they will be taken from the camera", "nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args() def main(): args = parse_args() # check if", "vtkTimerCallback(update_func, len(steps), save) if len(actors) > 0: cb.update_actors = [actors[tau.index(t)] for t in", "if vtk.VTK_MAJOR_VERSION <= 5: mapper.SetInput(polygonPolyData) else: mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1],", "= vtk.vtkRenderWindow() self.renderWindow.AddRenderer(self.renderer); self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self, step, tau_list,", "list of cell types :param tau_colors: list with color per cell type :param", "sys.exit(\"No vtk files found in {}\".format(args.simdir)) # set colors and opacity if not", "after adding the actors to the renderer, should not be used for animations", "first the actors for tau_list followed by the bounding box (if applicable) \"\"\"", "points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d) points.InsertNextPoint(0,h,f*d) polygon = vtk.vtkPolygon() polygon.GetPointIds().SetNumberOfIds(4) # make a quad polygon.GetPointIds().SetId(0,", "frame box of the simulation domain \"\"\" (w, h, d) = self.data[self.data.keys()[0]].GetDimensions() imageData", "== 1: args.alpha = [args.alpha for t in args.celltypes] elif len(args.alpha) < len(args.celltypes):", "for tau in tau_list] if tau_alpha is None: tau_alpha = [1 for tau", "'-' in tp else 1 if 'x' in tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d)", "Create actor for a cell type \"\"\" if isinstance(color, basestring): # convert color", "order='F') show_idx = np.unique(sigma[tau == show_tau]) points = vtk.vtkPoints() for s in show_idx:", "help=\"opacity of the cell types\") parser.add_argument(\"--static\", type=int, nargs=\"*\", help=\"static cell types (will NOT", "position and focal point are not given, they will be taken from the", "list of cell types and add them to the renderer :param step: step", "len(steps), save) if len(actors) > 0: cb.update_actors = [actors[tau.index(t)] for t in tau", "win = iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer() # remove all actors that will be", "bbox=False, tau_alpha=update_alpha, tau_colors=update_colors, imprefix=imprefix, impath=impath) cb = vtkTimerCallback(update_func, len(steps), save) if len(actors) >", "glyph = vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <= 5: glyph.SetInput(polydata) else: glyph.SetInputData(polydata) glyph.SetSourceConnection(sources.GetOutputPort()) glyph.ScalingOff() glyph.Update()", "tau_alpha=None, steps=None, save=False, impath=None, imprefix=None, fps=5, static_tau=None): \"\"\" Animate simulation results :param tau:", "opacity per cell type :param bbox: show bounding box :returns: list of actors", "simulation results :param tau: list of cell types :param tau_colors: list with color", "s)) for p in pix: points.InsertNextPoint(p[0] - .5, p[1] - .5, p[2] -", "static_tau] else: cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId", "care of updating the visualzation class vtkTimerCallback(): def __init__(self, update_func, tmax=1, save=False): self.timer_count", "len(actors) > 0: cb.update_actors = [actors[tau.index(t)] for t in tau if t not", "static_tau] update_colors = [tau_colors[i] for i, t in enumerate(tau) if t in update_tau]", "be taken from the camera in the renderer. :param renderer: vtk renderer :param", "= self.visualize(steps[0], tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha, tau_colors=tau_colors,bnd=self.bnd_colors) if static_tau is None: static_tau", "every time it is used instead of \" \"keeping it in memory\") parser.add_argument(\"--win\",", "Create actors for a list of cell types and add them to the", "[] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps))) cb.timerId = timerId # start", "type=int, nargs=\"*\", help=\"cell types to animate\", required=True) parser.add_argument(\"-c\", \"--colors\", type=str, nargs=\"*\", help=\"colors or", ": f for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps} else: self.files =", "\"\"\" cc = colors.ColorConverter() if name in colors.cnames: return cc.to_rgb(name) else: return cc.to_rgb(\"grey\")", "values not specified - default to opaque objects\" args.alpha = [1 for t", "matplotlib color name \"\"\" cc = colors.ColorConverter() if name in colors.cnames: return cc.to_rgb(name)", "a list of polygons polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData", "skip file\".format(fn) return None return reader.GetOutput() def visualize(self, step, tau_list, show=False, save=False, impath=None,", "= {'position': args.camposition, 'focal point': args.camfocus} # create visualizer v = Visualizer3D(args.simdir, winsize=args.winsize,", "cam_props: dictionary with options (see above) as keys and settings as values \"\"\"", "= [get_color(args.colors[0]) for t in args.celltypes] elif len(args.colors) < len(args.celltypes): print \"Number of", "help=\"make movie after closing the visualization window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\") parser.add_argument(\"--moviename\", type=str,", "bbox_color self.cam_props = cam_props self.storeafterread = storeafterread self.bnd_colors = bnd_colors # read data", "self.timer_count % self.tmax self.save = False # get new actors actors = self.update(t,", "in tau_list] # get actors stepdata = self._get_step(step) if stepdata is None: return", "not None: self._modify_cam() if show: self.renderWindowInteractor.Initialize() self.renderWindowInteractor.Start() if save: w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow)", "onthefly=(not args.readall), storeafterread=(not args.savemem), bnd_colors=bnd) # start animation v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps, save=args.saveim,", "applicable) \"\"\" # set default colors and opacity when they are not specified", "before the visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk file every time it is", "tau_colors = [(0.5, 0.5, 0.5) for tau in tau_list] if tau_alpha is None:", "find {}\".format(args.simdir)) elif len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No vtk files found in {}\".format(args.simdir)) #", "for t in args.celltypes] bnd = {} if args.color_xmax is not None: bnd['x']", "tau_alpha = [1 for tau in tau_list] # get actors stepdata = self._get_step(step)", "types - default to opaque objects\" args.alpha = [1 for t in args.celltypes]", "all steps will be shown if this \" \"is not specified\") parser.add_argument(\"-f\", \"--fps\",", "default=5, help=\"frames per second\") parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image", "type=float, nargs=3, default=(100, 100, 50), help=\"camera focal point\") # parser.add_argument(\"--campitch\", type=float, default=, help=\"camera", "(if applicable) \"\"\" # set default colors and opacity when they are not", "vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None): \"\"\"", "cam.SetPosition(self.cam_props['position']) else: cam.SetPosition(old_cam.GetPosition()) if 'focal point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if", "self.renderWindowInteractor = vtk.vtkRenderWindowInteractor() self.renderWindowInteractor.SetRenderWindow(self.renderWindow) self.renderWindow.SetSize(winsize[0], winsize[1]) def get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None, bbox=True,", "type \"\"\" if isinstance(color, basestring): # convert color to rgb string if color", "be used for animations :param save: save view to png :param impath: path", "else: mapper.SetInputData(polygonPolyData) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2]) return actor def _get_box_actor(self):", "bg=args.bgcolor, bbox_color=args.bboxcolor, cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem), bnd_colors=bnd) # start animation v.animate(args.celltypes, tau_colors=args.colors,", "# Create a mapper and actor mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5:", "add them to the renderer :param step: step to visualize :param tau_list: list", "0), help=\"background color\") parser.add_argument(\"--camposition\", type=float, nargs=3, default=(-200, 200, 200), help=\"camera position\") parser.add_argument(\"--camfocus\", type=float,", "found_im2movie = True try: from im2movie import makeMovie except ImportError: found_im2movie = False", "file every time it is used instead of \" \"keeping it in memory\")", "cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print 'add boundary for", "settings as values \"\"\" old_cam = self.renderer.GetActiveCamera(); cam = vtk.vtkCamera() if 'position' in", "for animations :param save: save view to png :param impath: path to store", "fps=args.fps, static_tau=args.static) # create and store movie if args.movie and found_im2movie: if args.moviedir", "polygons = vtk.vtkCellArray() polygons.InsertNextCell(polygon) # Create a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons)", "of updating the visualzation class vtkTimerCallback(): def __init__(self, update_func, tmax=1, save=False): self.timer_count =", "0.5, 0.5), opacity=1): \"\"\" Create actor for a cell type \"\"\" if isinstance(color,", "mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(glyph.GetOutputPort()) actor = vtk.vtkActor() actor.GetProperty().SetOpacity(opacity) actor.GetProperty().SetColor(color[0], color[1], color[2]) actor.SetMapper(mapper) return", "s not in sigma: continue pix = np.column_stack(np.where(sigma == s)) for p in", "files \"\"\" reader = vtk.vtkStructuredPointsReader() reader.SetFileName(fn) reader.ReadAllScalarsOn() reader.Update() data = reader.GetOutput() if data.GetPointData().HasArray('cell.id')", "directory {}\".format(args.outdir) os.makedirs(args.outdir) if not args.imprefix: args.imprefix = \"frame\" # set camera cam_props", "animations :param save: save view to png :param impath: path to store image", "len(glob.glob(\"{}/*.vtk\".format(args.simdir))) == 0: sys.exit(\"No vtk files found in {}\".format(args.simdir)) # set colors and", "actors actors = self.update(t, self.save) self.update_actors = actors self.timer_count += 1 class Visualizer3D():", "elif not found_im2movie: print \"WARNING: Movie generation is turned of because im2movie was", "step: step to visualize :param tau_list: list of cell types :param show: initialize", "for t in tau] if steps is None: steps = self.files.keys() steps.sort() self.renderWindowInteractor.Initialize()", "Create and return actor for wire frame box of the simulation domain \"\"\"", "steps=None, save=False, impath=None, imprefix=None, fps=5, static_tau=None): \"\"\" Animate simulation results :param tau: list", "= argparse.ArgumentParser() # parser.description(\"Animate 3D Morpheus simulations\") parser.add_argument(\"-i\", \"--simdir\", type=str, default=\"./\", help=\"Simulation folder\")", "__version__ = \"0.1\" __maintainer__ = \"<NAME>\" # the vtkTimerCallback takes care of updating", "polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper and actor mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION", "at once before the visualization starts\") parser.add_argument(\"--savemem\", action=\"store_true\", help=\"reread vtk file every time", "be updated during animation)\") parser.add_argument(\"--bboxcolor\", type=float, nargs=3, default=(1, 1, 1), help=\"bounding box color\")", "event): iren = obj win = iren.GetRenderWindow() ren = win.GetRenderers().GetFirstRenderer() # remove all", "not match number of cell types - default to opaque objects\" args.alpha =", "actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2]) actor.GetProperty().SetRepresentationToWireframe() return actor def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5, 0.5),", "onthefly=False, storeafterread=True, bnd_colors=None): self.bbox_color = bbox_color self.cam_props = cam_props self.storeafterread = storeafterread self.bnd_colors", "args.outdir, args.moviedir, args.fps, win=args.win, tomp4=args.mp4) elif not found_im2movie: print \"WARNING: Movie generation is", "help=\"camera focal point\") # parser.add_argument(\"--campitch\", type=float, default=, help=\"camera pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\", help=\"steps", "store image :param imprefix: image prefix :param fps: frames per second :param static_tau:", "sys import argparse import numpy as np import vtk from vtk.util import numpy_support", "self.timer_count if self.timer_count >= self.tmax: t = self.timer_count % self.tmax self.save = False", "args.camposition, 'focal point': args.camfocus} # create visualizer v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor,", "above) as keys and settings as values \"\"\" old_cam = self.renderer.GetActiveCamera(); cam =", "missing from {} -> skip file\".format(fn) return None if data.GetPointData().HasArray('cell.type') != 1: print", "not args.outdir: args.outdir = args.simdir if not os.path.isdir(args.outdir): print \"Create output directory {}\".format(args.outdir)", "up vtk renderer \"\"\" self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(bg[0], bg[1], bg[2]) self.renderWindow = vtk.vtkRenderWindow()", "Create a PolyData polygonPolyData = vtk.vtkPolyData() polygonPolyData.SetPoints(points) polygonPolyData.SetPolys(polygons) # Create a mapper and", "args.imprefix or args.outdir or args.movie: args.saveim = True if args.saveim: if not args.outdir:", "point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam)", "for t in args.celltypes] else: args.colors = [get_color(c) for c in args.colors] if", "dictionary with options (see above) as keys and settings as values \"\"\" old_cam", "per cell type \"\"\" self.renderWindow.SetWindowName('step ' + str(int(step))) actors = self.get_actors(step, tau_list, tau_colors,", "[self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for i, tau in enumerate(tau_list)] # get bounding box", "generation is turned of because im2movie was not found\" if __name__ == \"__main__\":", "will be updated for actor in self.update_actors: ren.RemoveActor(actor) # set t to correct", "actors.append(self._get_box_actor()) if bnd is not None: for tp,color in bnd.iteritems(): actors.append(self._get_bnd_actor(tp,color)) # add", "actor def _get_box_actor(self): \"\"\" Create and return actor for wire frame box of", "vtk.util import numpy_support as VN from matplotlib import colors found_im2movie = True try:", "camera cam_props = {'position': args.camposition, 'focal point': args.camfocus} # create visualizer v =", "bnd=None): \"\"\" Create actors for a list of cell types and add them", "= bbox_color self.cam_props = cam_props self.storeafterread = storeafterread self.bnd_colors = bnd_colors # read", "containing vtk files :param steps: steps to visualize :param winsize: window size :param", "not match number of cell types - default to grey\" args.colors = [get_color(\"grey\")", "tau_colors=None, bnd=None): \"\"\" Visualize a given step. :param step: step to visualize :param", "used instead of \" \"keeping it in memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie windows", "specified - default to opaque objects\" args.alpha = [1 for t in args.celltypes]", "update_tau] update_alpha = [tau_alpha[i] for i, t in enumerate(tau) if t in update_tau]", "w2i = vtk.vtkWindowToImageFilter() w2i.SetInput(self.renderWindow) w2i.Update() writer = vtk.vtkPNGWriter() writer.SetInputConnection(w2i.GetOutputPort()) if imprefix is not", "does not match number of cell types - default to grey\" args.colors =", "= [actors[tau.index(t)] for t in tau if t not in static_tau] else: cb.update_actors", "settings :param onthefly: read data on the fly instead of all at once", "\"keeping it in memory\") parser.add_argument(\"--win\", action=\"store_true\", help=\"make movie windows compatible\") parser.add_argument(\"--mp4\", action=\"store_true\", help=\"make", "type=float, default=5, help=\"frames per second\") parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str,", "print \"Create output directory {}\".format(args.outdir) os.makedirs(args.outdir) if not args.imprefix: args.imprefix = \"frame\" #", "show: initialize and start the render window after adding the actors to the", "update_alpha = [tau_alpha[i] for i, t in enumerate(tau) if t in update_tau] update_func", "type=str, help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\", help=\"read all data at once before the visualization", "camera in the renderer. :param renderer: vtk renderer :param cam_props: dictionary with options", "len(tau)): tau_colors = [(.5, .5, .5) for t in tau] if (tau_alpha is", "list with color per cell type \"\"\" self.renderWindow.SetWindowName('step ' + str(int(step))) actors =", "= args.color_zmin if len(bnd) == 0: bnd = {} # set saving options", "\"\"\" import os import glob import sys import argparse import numpy as np", "self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir))} if not onthefly: self.data", "for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps} else: self.files = {get_num(f) :", "\"frame\" # set camera cam_props = {'position': args.camposition, 'focal point': args.camfocus} # create", "args.celltypes] elif len(args.alpha) == 1: args.alpha = [args.alpha for t in args.celltypes] elif", "in sigma: continue pix = np.column_stack(np.where(sigma == s)) for p in pix: points.InsertNextPoint(p[0]", "instead of all at once \"\"\" def __init__(self, simdir, steps=None, winsize=(800, 800), bg=(0,", "are not specified if tau_colors is None: tau_colors = [(0.5, 0.5, 0.5) for", "= args.color_ymin if args.color_zmin is not None: bnd['-z'] = args.color_zmin if len(bnd) ==", "{}/{}{:03d}.png'.format(impath, imprefix, step) writer.Write() return actors def animate(self, tau, tau_colors=None, tau_alpha=None, steps=None, save=False,", "(0.5, 0.5, 0.5) dim = stepdata.GetDimensions() sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id')) sigma = sigma.reshape(dim, order='F')", "self.data[step] else: return self._load_data(self.files[step]) def _set_renderer(self, winsize, bg): \"\"\" Set up vtk renderer", "setup renderer self._set_renderer(winsize, bg) def _get_step(self,step): \"\"\" Retrieve vtk data for a specific", "save def execute(self, obj, event): iren = obj win = iren.GetRenderWindow() ren =", "= vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color[0], color[1], color[2]) return actor def _get_box_actor(self): \"\"\" Create and", "not specified\") parser.add_argument(\"-f\", \"--fps\", type=float, default=5, help=\"frames per second\") parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output", "tau_alpha is None: tau_alpha = [1 for tau in tau_list] # get actors", "(tau_alpha is None) or (len(tau_alpha) is not len(tau)): tau_alpha = [1 for t", "== show_tau]) points = vtk.vtkPoints() for s in show_idx: if s not in", "image :param imprefix: image prefix :param bbox: show bounding box :param tau_alpha: list", "box color\") parser.add_argument(\"--bgcolor\", type=float, nargs=3, default=(0, 0, 0), help=\"background color\") parser.add_argument(\"--camposition\", type=float, nargs=3,", "for i, t in enumerate(tau) if t in update_tau] update_alpha = [tau_alpha[i] for", "this \" \"is not specified\") parser.add_argument(\"-f\", \"--fps\", type=float, default=5, help=\"frames per second\") parser.add_argument(\"-o\",", "parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image prefix\") parser.add_argument(\"-s\", \"--saveim\", action=\"store_true\", help=\"save images\") parser.add_argument(\"-m\", \"--movie\", action=\"store_true\",", "cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch']) self.renderer.SetActiveCamera(cam) def _get_bnd_actor(self,tp,color): print", "(w, h, d) = self.data[self.data.keys()[0]].GetDimensions() points = vtk.vtkPoints() f = 0 if '-'", "continue pix = np.column_stack(np.where(sigma == s)) for p in pix: points.InsertNextPoint(p[0] - .5,", "tau_colors=update_colors, imprefix=imprefix, impath=impath) cb = vtkTimerCallback(update_func, len(steps), save) if len(actors) > 0: cb.update_actors", "50), help=\"camera focal point\") # parser.add_argument(\"--campitch\", type=float, default=, help=\"camera pitch\") parser.add_argument(\"--steps\", type=int, nargs=\"*\",", "= vtk.vtkPolyData() polydata.SetPoints(points) sources = vtk.vtkCubeSource() sources.Update() glyph = vtk.vtkGlyph3D() if vtk.VTK_MAJOR_VERSION <=", "impath: path to store image :param imprefix: image prefix :param fps: frames per", "view to png :param impath: path to store image :param imprefix: image prefix", "the renderer, should not be used for animations :param save: save view to", "len(args.alpha) == 1: args.alpha = [args.alpha for t in args.celltypes] elif len(args.alpha) <", "followed by the bounding box (if applicable) \"\"\" # set default colors and", "tp: points.InsertNextPoint(f*w,0,0) points.InsertNextPoint(f*w,h,0) points.InsertNextPoint(f*w,h,d) points.InsertNextPoint(f*w,0,d) elif 'y' in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d)", "the visualization window\") parser.add_argument(\"--moviedir\", type=str, help=\"movie directory\") parser.add_argument(\"--moviename\", type=str, help=\"movie name\") parser.add_argument(\"--readall\", action=\"store_true\",", "update_func self.tmax = tmax self.update_actors = None self.save = save def execute(self, obj,", "2016\" __credits__ = \"<NAME>\" __license__ = \"MIT\" __version__ = \"0.1\" __maintainer__ = \"<NAME>\"", "bg: background color :param bbox_color: bounding box wire frame color :param cam_props: dictionary", "args.celltypes] bnd = {} if args.color_xmax is not None: bnd['x'] = args.color_xmax if", "parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args() def main(): args = parse_args() # check if there", "point are not given, they will be taken from the camera in the", "steps: steps to visualize :param winsize: window size :param bg: background color :param", "tau_list] # get actors stepdata = self._get_step(step) if stepdata is None: return []", "np.column_stack(np.where(sigma == s)) for p in pix: points.InsertNextPoint(p[0] - .5, p[1] - .5,", "opacity if not args.colors: print \"Cell color not specified - default to grey\"", "to store image :param imprefix: image prefix :param bbox: show bounding box :param", "not os.path.isdir(args.outdir): print \"Create output directory {}\".format(args.outdir) os.makedirs(args.outdir) if not args.imprefix: args.imprefix =", "'focal point' in self.cam_props: cam.SetFocalPoint(self.cam_props['focal point']) else: cam.SetFocalPoint(old_cam.GetFocalPoint()) if 'pitch' in self.cam_props: cam.Pitch(self.cam_props['pitch'])", "should not be updated during the animation \"\"\" if (tau_colors is None) or", "step. :param step: step to visualize :param tau_list: list of cell types :param", "\"\"\" if isinstance(color, basestring): # convert color to rgb string if color in", "type :param steps: steps (all steps are shown when not specified) :param save:", "self.save) self.update_actors = actors self.timer_count += 1 class Visualizer3D(): \"\"\" Create visualizer object", "should not be used for animations :param save: save view to png :param", "specifically to visualize the VTK files created by Morpheus \"\"\" import os import", "else: actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for i, tau in enumerate(tau_list)] #", "# make a quad polygon.GetPointIds().SetId(0, 0) polygon.GetPointIds().SetId(1, 1) polygon.GetPointIds().SetId(2, 2) polygon.GetPointIds().SetId(3, 3) #", "are shown when not specified) :param save: save view to png :param impath:", "not in static_tau] else: cb.update_actors = [] self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute) timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 /", "print \"Number of colors does not match number of cell types - default", "not None: bnd['y'] = args.color_ymax if args.color_zmax is not None: bnd['z'] = args.color_zmax", "per second\") parser.add_argument(\"-o\", \"--outdir\", type=str, help=\"output directory\") parser.add_argument(\"-p\", \"--imprefix\", type=str, help=\"image prefix\") parser.add_argument(\"-s\",", "args.colors = [get_color(\"grey\") for t in args.celltypes] else: args.colors = [get_color(c) for c", "import argparse import numpy as np import vtk from vtk.util import numpy_support as", "in tp: points.InsertNextPoint(0,f*h,0) points.InsertNextPoint(w,f*h,0) points.InsertNextPoint(w,f*h,d) points.InsertNextPoint(0,f*h,d) elif 'z' in tp: points.InsertNextPoint(0,0,f*d) points.InsertNextPoint(w,0,f*d) points.InsertNextPoint(w,h,f*d)", "nargs=3) parser.add_argument(\"--color_ymin\",type=float, nargs=3) parser.add_argument(\"--color_zmin\",type=float, nargs=3) parser.add_argument(\"--color_xmax\",type=float, nargs=3) parser.add_argument(\"--color_ymax\",type=float, nargs=3) parser.add_argument(\"--color_zmax\",type=float, nargs=3) return parser.parse_args()", "file\".format(fn) return None return reader.GetOutput() def visualize(self, step, tau_list, show=False, save=False, impath=None, imprefix=None,", "t in tau] if steps is None: steps = self.files.keys() steps.sort() self.renderWindowInteractor.Initialize() actors", "action=\"store_true\", help=\"reread vtk file every time it is used instead of \" \"keeping", ":param tau_colors: list with color per cell type \"\"\" self.renderWindow.SetWindowName('step ' + str(int(step)))", "renderer self._set_renderer(winsize, bg) def _get_step(self,step): \"\"\" Retrieve vtk data for a specific step" ]
[ "produce a random energy energies[j <= 0] = E[0].to('MeV').value energies[j >= len(E)-1] =", "raise ValueError('Unrecognized distance_model: {}'.format(dmtype)) return Source(config.source.name, config.source.model, parse_quantity(config.source.progenitor.mass), distance_model.distance(), time, luminosity, mean_energy, pinch)", "*= 2 print('Completed') return E_per_V * u.MeV / u.m**3 def initialize(config): \"\"\"Initialize a", "/ ( 4 * np.pi * dist**2) * np.ediff1d(t, to_end=(t[-1] - t[-2])) if", "pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False ) elif config.source.table.format.lower() == 'ascii': # ASCII will", "relative to core bounce. E : `numpy.ndarray` Sorted grid of neutrino energies flavor", "mean energy table. fitsfile = '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table = Table.read(fitsfile) time = sn_data_table['TIME'].to('s')", "index partitions (partitions are numpy array). \"\"\" nParts = x.size//n i_part = [", "by photons from SN neutrino interactions. Parameters ---------- time : float (units s)", "mean_energy else: flux = 0 return flux / u.s # Where the mean", "---------- time : float (units s) Time relative to core bounce. E :", "neutrinos # per second, elsewhere, returns zero. # flux = np.ediff1d(t, to_end=(t[-1] -", "numpy array). \"\"\" nParts = x.size//n i_part = [ np.arange( i*n, (i+1)*n )", "a particular neutrino flavor. The energies are generated via inverse transform sampling of", "pinch = {}, {}, {} if config.source.table.format.lower() == 'fits': # Open FITS file,", "astropy import units as u from astropy.table import Table from abc import ABC,", "mean_energy, where=(mean_energy > 0), out=np.zeros(len(luminosity))) else: if mean_energy > 0.: flux = luminosity", "# parameter (alpha) and mean energy table. fitsfile = '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table =", "progenitor_distance self.time = time self.luminosity = luminosity self.mean_energy = mean_energy self.pinch = pinch", "/ u.s # Where the mean energy is not zero, return rate in", "E: \\ gdtr(1., a + 1., (a + 1.) * (E / Ea))", "size <n if x.size is not multiple of n if len(i_part)*n != x.size:", "E.value).real def sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a random sample of neutrino", "import loggamma, gdtr from scipy.interpolate import PchipInterpolator class Source: def __init__(self, name, spectral_model,", "x.size is not multiple of n if len(i_part)*n != x.size: i_part += [", "sampled from the energy spectrum. \"\"\" cdf = self.energy_cdf(flavor, t, E) energies =", "relative to core bounce. E : `numpy.ndarray` Sorted grid of neutrino energies to", "= luminosity self.mean_energy = mean_energy self.pinch = pinch # Energy PDF function is", "contains a luminosity table and a pinching # parameter (alpha) and mean energy", "nearly all CCSN models. self.energy_pdf = lambda a, Ea, E: \\ np.exp((1 +", "progenitor_mass self.progenitor_distance = progenitor_distance self.time = time self.luminosity = luminosity self.mean_energy = mean_energy", "return E_per_V * u.MeV / u.m**3 def initialize(config): \"\"\"Initialize a Source model from", "Neutrino flavor. Returns ------- mean_energy : float Source mean energy (units of energy).", "path length per MeV, sorted according to parameter E n : int Maximum", "\"\"\"Return source time as numpy array. Returns ------- time : float Source time", "Returns ------- Source An initialized source model. \"\"\" # Dictionary of L, <E>,", "float Source number flux (unit-less, count of neutrinos). \"\"\" t = time.to(u.s).value luminosity", "given time. \"\"\" from __future__ import print_function, division from snewpy.neutrino import Flavor from", "flux def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF of the neutrino energy", "gdtr(1., a + 1., (a + 1.) * (E / Ea)) def parts_by_index(self,", ": `numpy.ndarray` Table of PDF values computed as a function of energy. \"\"\"", "* dist**2) * np.ediff1d(t, to_end=(t[-1] - t[-2])) if not flavor.is_electron: E_per_V *= 2", "& (Ea > 0) E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1,", "= self.get_pinch_parameter(t, flavor) Ea = self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t, (list, tuple, np.ndarray)): #", "axis=0) E_per_V *= H2O_in_ice / ( 4 * np.pi * dist**2) * np.ediff1d(t,", "mean_energy : float Source mean energy (units of energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) *", "time, E, flavor, photon_spectrum, mixing=None, n=1000): \"\"\"Compute the energy deposited in a cubic", "fl in col for col in sn_data_table.keys() ): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E =", "spectrum : `numpy.ndarray` Table of PDF values computed as a function of energy.", "self.get_mean_energy(t, flavor).value if isinstance(t, (list, tuple, np.ndarray)): flux = np.divide(luminosity, mean_energy, where=(mean_energy >", "= self.energy_cdf(flavor, t, E) energies = np.zeros(n, dtype=float) # Generate a random number", "name self.model = spectral_model self.progenitor_mass = progenitor_mass self.progenitor_distance = progenitor_distance self.time = time", "relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- luminosity :", "en = E[j] + (E[j+1] - E[j]) / (cdf[j+1] - cdf[j]) * (u[cut]", "Ea, E.value).real else: return self.energy_pdf(a, Ea, E.value).real def sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR):", "bounce. E : `numpy.ndarray` Sorted grid of neutrino energies flavor : :class:`asteria.neutrino.Flavor` Neutrino", "* (u[cut] - cdf[j]) energies[cut] = en return energies def photonic_energy_per_vol(self, time, E,", "self.name = name self.model = spectral_model self.progenitor_mass = progenitor_mass self.progenitor_distance = progenitor_distance self.time", "will # always return a number when numpy size is called on them,", "a given flavor. Parameters ---------- t : float Time relative to core bounce.", "dist**2) * np.ediff1d(t, to_end=(t[-1] - t[-2])) if not flavor.is_electron: E_per_V *= 2 print('Completed')", "4 * np.pi * dist**2) * np.ediff1d(t, to_end=(t[-1] - t[-2])) if not flavor.is_electron:", "to compute the energy PDF. Returns ------- spectrum : `numpy.ndarray` Table of PDF", "energies[j >= len(E)-1] = E[-1].to('MeV').value cut = (0 < j) & (j <", "0) & (Ea > 0) E_pdf = np.zeros( (Enu.size, t.size), dtype = float", "and a pinching # parameter (alpha) and mean energy table. fitsfile = '/'.join([config.abs_base_path,", "in the CCSN literature. For each species of neutrino one requires an estimate", "------- mean_energy : float Source mean energy (units of energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t))", "Parameters ---------- t : float Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor`", "= E.to(u.MeV).value if Enu[0] == 0.: Enu[0] = 1e-10 # u.MeV a =", "of neutrino energies at some time t for a particular neutrino flavor. The", "flavor).value * phot, Enu, axis=0) E_per_V *= H2O_in_ice / ( 4 * np.pi", "to compute the energy PDF. n : int Number of energy samples to", "flux = 0 return flux / u.s # Where the mean energy is", "each species of neutrino one requires an estimate of the luminosity vs. time", "- loggamma(1 + a) + a * np.log(E) - \\ (1 + a)", "E = sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time, L,", "compare to the CDF # of the neutrino energy distribution at time t", "time : float (units s) Time relative to core bounce. E : `numpy.ndarray`", "module encapsulates the basic parameters of neutrino fluxes from supernovae as modeled in", "at time t. Parameters ---------- t : float Time relative to core bounce.", "luminosity, mean_energy, pinch = {}, {}, {} if config.source.table.format.lower() == 'fits': # Open", "one size-m array ( with m<n ) if there are remaining elements of", "return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching paramter alpha", "the CDF # of the neutrino energy distribution at time t u =", "to compute at once. A temporary numpy array of size n x time.size", "neutrino energy distribution. Parameters ---------- t : float Time relative to core bounce.", "scipy.interpolate import PchipInterpolator class Source: def __init__(self, name, spectral_model, progenitor_mass, progenitor_distance, time={}, luminosity={},", "a[a<0] = 0 cut = (a >= 0) & (Ea > 0) E_pdf", "------- spectrum : `numpy.ndarray` Table of PDF values computed as a function of", "== 0.: Enu[0] = 1e-10 # u.MeV a = self.get_pinch_parameter(t, flavor) Ea =", "= 1e-10 * u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist = self.progenitor_distance.to(u.m).value #", "* phot, Enu, axis=0) E_per_V *= H2O_in_ice / ( 4 * np.pi *", "__future__ import print_function, division from snewpy.neutrino import Flavor from .stellardist import FixedDistance, StellarDensity", "len(i_part)*n != x.size: i_part += [ np.arange( len(i_part)*n, x.size ) ] # Ensure", ": :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- pinch : float Source pinching parameter (unitless).", "core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. E : `numpy.ndarray` Sorted grid of", "and pinch parameter. # Use simple 1D linear interpolation t = time.to(u.s).value Enu", "flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity at time t for a given flavor. Parameters ----------", "pinching # parameter (alpha) and mean energy table. fitsfile = '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table", "distribution at time t u = np.random.uniform(n) j = np.searchsorted(cdf, u) # Linearly", "generated via inverse transform sampling of the CDF of the neutrino energy distribution.", "gdtr from scipy.interpolate import PchipInterpolator class Source: def __init__(self, name, spectral_model, progenitor_mass, progenitor_distance,", "L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif fl == 'NU_X_BAR':", "* np.log(1 + a) - loggamma(1 + a) + a * np.log(E) -", "= [ np.arange( i*n, (i+1)*n ) for i in range(nParts) ] # Generate", "not multiple of n if len(i_part)*n != x.size: i_part += [ np.arange( len(i_part)*n,", "Ea, E.value).real def sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a random sample of", "Source model from configuration parameters. Parameters ---------- config : :class:`asteria.config.Configuration` Configuration parameters used", "as well as the energy spectrum of the neutrinos at any given time.", "import Flavor from .stellardist import FixedDistance, StellarDensity from .config import parse_quantity from astropy", "config : :class:`asteria.config.Configuration` Configuration parameters used to create a Source. Returns ------- Source", "dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r, dr) elif dmtype == 'StellarDensity': # StellarDensity", "in the CDF to produce a random energy energies[j <= 0] = E[0].to('MeV').value", "Enu[0] = 1e-10 # u.MeV a = self.get_pinch_parameter(t, flavor) Ea = self.get_mean_energy(t, flavor).to(u.MeV).value", "------- luminosity : float Source luminosity (units of power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t)) *", "+ 1., (a + 1.) * (E / Ea)) def parts_by_index(self, x, n):", "partition always has 2 or more elements if len(i_part[-1]) < 2: i_part[-2] =", "{}, {} if config.source.table.format.lower() == 'fits': # Open FITS file, which contains a", "flux (unit-less, count of neutrinos). \"\"\" t = time.to(u.s).value luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value", ": `numpy.ndarray` Sorted grid of neutrino energies flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum", "source pinching paramter alpha at time t for a given flavor. Parameters ----------", "& (j < len(E)-1) j = j[cut] en = E[j] + (E[j+1] -", "numpy array. Returns ------- time : float Source time profile (units of s).", "core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- pinch : float Source", "cdf[j]) * (u[cut] - cdf[j]) energies[cut] = en return energies def photonic_energy_per_vol(self, time,", "a * np.log(E) - \\ (1 + a) * np.log(Ea) - (1 +", "neutrino flavor. The energies are generated via inverse transform sampling of the CDF", "of n if len(i_part)*n != x.size: i_part += [ np.arange( len(i_part)*n, x.size )", "> 0) E_pdf = np.zeros( (Enu.size, t.size), dtype = float ) E_pdf[:, cut]", "energy sampling. self.energy_cdf = lambda a, Ea, E: \\ gdtr(1., a + 1.,", "x.size ) ] # Ensure that last partition always has 2 or more", "self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1, 1)) return E_pdf else: if Ea <=", "energies are generated via inverse transform sampling of the CDF of the neutrino", "from .stellardist import FixedDistance, StellarDensity from .config import parse_quantity from astropy import units", "= np.searchsorted(cdf, u) # Linearly interpolate in the CDF to produce a random", "sn_data_table.keys() ): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif fl", "config.source.table.format.lower() == 'fits': # Open FITS file, which contains a luminosity table and", "table and a pinching # parameter (alpha) and mean energy table. fitsfile =", "cubic meter of ice by photons from SN neutrino interactions. Parameters ---------- time", "energy and pinch parameter. # Use simple 1D linear interpolation t = time.to(u.s).value", "return flux def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF of the neutrino", "import print_function, division from snewpy.neutrino import Flavor from .stellardist import FixedDistance, StellarDensity from", "of size <n if x.size is not multiple of n if len(i_part)*n !=", "very memory inefficient. Returns ------- E_per_V Energy per m**3 of ice deposited by", "np.zeros( (Enu.size, t.size), dtype = float ) E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1),", "mixing(self) print('Beginning {0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='') # The following two lines", "<E>, and alpha versus time, keyed by neutrino flavor. luminosity, mean_energy, pinch =", "created and can be very memory inefficient. Returns ------- E_per_V Energy per m**3", "= name self.model = spectral_model self.progenitor_mass = progenitor_mass self.progenitor_distance = progenitor_distance self.time =", "progenitor_distance, time={}, luminosity={}, mean_energy={}, pinch={}): self.name = name self.model = spectral_model self.progenitor_mass =", "E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E, flavor).value * phot, Enu, axis=0) E_per_V *= H2O_in_ice", "# Where the mean energy is not zero, return rate in units neutrinos", "pinch parameter alpha. True for # nearly all CCSN models. self.energy_pdf = lambda", "\\ gdtr(1., a + 1., (a + 1.) * (E / Ea)) def", "# m**2 flux = self.get_flux( time, flavor ) # s**-1 if mixing is", "given flavor. Parameters ---------- t : float Time relative to core bounce. flavor", "smc) else: raise ValueError('Unrecognized distance_model: {}'.format(dmtype)) return Source(config.source.name, config.source.model, parse_quantity(config.source.progenitor.mass), distance_model.distance(), time, luminosity,", "is assumed to be like a gamma function, # parameterized by mean energy", ".stellardist import FixedDistance, StellarDensity from .config import parse_quantity from astropy import units as", "luminosity at time t for a given flavor. Parameters ---------- t : float", "class Source: def __init__(self, name, spectral_model, progenitor_mass, progenitor_distance, time={}, luminosity={}, mean_energy={}, pinch={}): self.name", "initialized source model. \"\"\" # Dictionary of L, <E>, and alpha versus time,", "< 2: raise RuntimeError(\"Time array size <2, unable to compute energy per volume.\")", "a random number between 0 and 1 and compare to the CDF #", "Grid of the product of lepton cross section with lepton mean energy and", "t, flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching paramter alpha at time t for a given", "t, E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a random sample of neutrino energies at some", "between 0 and 1 and compare to the CDF # of the neutrino", "x time.size is created and can be very memory inefficient. Returns ------- E_per_V", "out=np.zeros(len(luminosity))) else: if mean_energy > 0.: flux = luminosity / mean_energy else: flux", "flavor).to(u.MeV).value if isinstance(t, (list, tuple, np.ndarray)): # It is non-physical to have a<0", "random number between 0 and 1 and compare to the CDF # of", "'ascii': # ASCII will be supported! Promise, promise. raise ValueError('Unsupported format: \"ASCII\"') else:", "len(i_part)*n, x.size ) ] # Ensure that last partition always has 2 or", "time t for a particular neutrino flavor. The energies are generated via inverse", "lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile, lmc, smc) else: raise", "Returns ------- E_per_V Energy per m**3 of ice deposited by neutrinos of requested", "# It is non-physical to have a<0 but some model files/interpolations still have", ") elif config.source.table.format.lower() == 'ascii': # ASCII will be supported! Promise, promise. raise", "from the energy spectrum. \"\"\" cdf = self.energy_cdf(flavor, t, E) energies = np.zeros(n,", "Energy CDF, useful for random energy sampling. self.energy_cdf = lambda a, Ea, E:", "import parse_quantity from astropy import units as u from astropy.table import Table from", "Neutrino flavor. E : `numpy.ndarray` Sorted grid of neutrino energies to compute the", "relative to core bounce (units seconds). flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns -------", ":class:`asteria.config.Configuration` Configuration parameters used to create a Source. Returns ------- Source An initialized", "* rate # # return flux def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the", "ice by photons from SN neutrino interactions. Parameters ---------- time : float (units", ": float Source pinching parameter (unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time, flavor=Flavor.NU_E_BAR):", "energies def photonic_energy_per_vol(self, time, E, flavor, photon_spectrum, mixing=None, n=1000): \"\"\"Compute the energy deposited", "(E / Ea)) def parts_by_index(self, x, n): \"\"\"Returns a list of size-n numpy", "`numpy.ndarray` (Units vary, m**2) Grid of the product of lepton cross section with", "to produce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- energies : `numpy.ndarray` Table", "sampling of the CDF of the neutrino energy distribution. Parameters ---------- t :", "FixedDistance model. r = parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r, dr) elif", "0.: Enu[0] = 1e-10 # u.MeV a = self.get_pinch_parameter(t, flavor) Ea = self.get_mean_energy(t,", "sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor]", "of requested flavor \"\"\" H2O_in_ice = 3.053e28 # 1 / u.m**3 t =", "get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching paramter alpha at time t for a", "i_part : list List of index partitions (partitions are numpy array). \"\"\" nParts", "= j[cut] en = E[j] + (E[j+1] - E[j]) / (cdf[j+1] - cdf[j])", "float Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. E :", "<n if x.size is not multiple of n if len(i_part)*n != x.size: i_part", "return self.time def get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity at time t for", "memory usage E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E, flavor).value * phot, Enu, axis=0) E_per_V", "Source pinching parameter (unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return source", "the mean energy is not zero, return rate in units neutrinos # per", "Given t, get current average energy and pinch parameter. # Use simple 1D", "len(E)-1] = E[-1].to('MeV').value cut = (0 < j) & (j < len(E)-1) j", "current average energy and pinch parameter. # Use simple 1D linear interpolation t", "'*(10-len(flavor.name))), end='') # The following two lines exploit the fact that astropy quantities", ") if there are remaining elements of x. Returns ------- i_part : list", "float (units s) Time relative to core bounce. E : `numpy.ndarray` Sorted grid", "* u.MeV / u.m**3 def initialize(config): \"\"\"Initialize a Source model from configuration parameters.", "= sn_data_table['TIME'].to('s') # Loop over all flavors in the table: for flavor in", "np.ndarray)): # It is non-physical to have a<0 but some model files/interpolations still", ": `numpy.ndarray` Sorted grid of neutrino energies to compute the energy PDF. Returns", "get current average energy and pinch parameter. # Use simple 1D linear interpolation", "2: raise RuntimeError(\"Time array size <2, unable to compute energy per volume.\") for", "t.size), dtype = float ) E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1))", "produce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- energies : `numpy.ndarray` Table of", "requested flavor \"\"\" H2O_in_ice = 3.053e28 # 1 / u.m**3 t = time.to(u.s).value", "RuntimeError(\"Time array size <2, unable to compute energy per volume.\") for i_part in", "else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False)", "a list of size-n numpy arrays containing indices for the elements of x,", "bounce (units seconds). flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- flux : float", "Sorted grid of neutrino energies to compute the energy PDF. n : int", ": list List of index partitions (partitions are numpy array). \"\"\" nParts =", "Source: def __init__(self, name, spectral_model, progenitor_mass, progenitor_distance, time={}, luminosity={}, mean_energy={}, pinch={}): self.name =", "0 and 1 and compare to the CDF # of the neutrino energy", "if Enu[0] == 0.: Enu[0] = 1e-10 # u.MeV a = self.get_pinch_parameter(t, flavor)", "core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- mean_energy : float Source", "\"\"\"Compute the PDF of the neutrino energy distribution at time t. Parameters ----------", "= time.to(u.s).value luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t, flavor).value if isinstance(t, (list,", "basic parameters of neutrino fluxes from supernovae as modeled in the CCSN literature.", "time steps to compute at once. A temporary numpy array of size n", "promise. raise ValueError('Unsupported format: \"ASCII\"') else: raise ValueError('Unknown format {}'.format(config.source.table.format)) # Set up", "over all flavors in the table: for flavor in Flavor: fl = flavor.name.upper()", "luminosity vs. time as well as the energy spectrum of the neutrinos at", "progenitor_mass, progenitor_distance, time={}, luminosity={}, mean_energy={}, pinch={}): self.name = name self.model = spectral_model self.progenitor_mass", "luminosity={}, mean_energy={}, pinch={}): self.name = name self.model = spectral_model self.progenitor_mass = progenitor_mass self.progenitor_distance", "particular neutrino flavor. The energies are generated via inverse transform sampling of the", "---------- t : float Time relative to core bounce. E : `numpy.ndarray` Sorted", "as modeled in the CCSN literature. For each species of neutrino one requires", "\"\"\"Return source pinching paramter alpha at time t for a given flavor. Parameters", "of the neutrino energy distribution. Parameters ---------- t : float Time relative to", "steps to compute at once. A temporary numpy array of size n x", "to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- luminosity : float", "indices for the elements of x, and one size-m array ( with m<n", "scipy.special import loggamma, gdtr from scipy.interpolate import PchipInterpolator class Source: def __init__(self, name,", "sampling. self.energy_cdf = lambda a, Ea, E: \\ gdtr(1., a + 1., (a", "Energy PDF function is assumed to be like a gamma function, # parameterized", "self.mean_energy = mean_energy self.pinch = pinch # Energy PDF function is assumed to", ": float Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. E", "StellarDensity(fitsfile, lmc, smc) else: raise ValueError('Unrecognized distance_model: {}'.format(dmtype)) return Source(config.source.name, config.source.model, parse_quantity(config.source.progenitor.mass), distance_model.distance(),", "flavor) else: nu_spectrum = mixing(self) print('Beginning {0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='') #", "the fact that astropy quantities will # always return a number when numpy", "luminosity table and a pinching # parameter (alpha) and mean energy table. fitsfile", "np.ediff1d(t, to_end=(t[-1] - t[-2])) * rate # # return flux def energy_spectrum(self, time,", "Returns ------- flux : float Source number flux (unit-less, count of neutrinos). \"\"\"", "fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile,", "= sn_data_table['ALPHA_{:s}'.format(fl)] elif fl == 'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV') alpha", "Linearly interpolate in the CDF to produce a random energy energies[j <= 0]", "for # nearly all CCSN models. self.energy_pdf = lambda a, Ea, E: \\", "luminosity (units of power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s) def get_mean_energy(self,", "the CCSN literature. For each species of neutrino one requires an estimate of", "0) E_pdf = np.zeros( (Enu.size, t.size), dtype = float ) E_pdf[:, cut] =", ": :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- energies : `numpy.ndarray` Table of energies sampled", "a) * np.log(1 + a) - loggamma(1 + a) + a * np.log(E)", "* np.log(Ea) - (1 + a) * (E / Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf,", "nu_spectrum(time[i_part], E, flavor).value * phot, Enu, axis=0) E_per_V *= H2O_in_ice / ( 4", "!= x.size: i_part += [ np.arange( len(i_part)*n, x.size ) ] # Ensure that", "relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. E : `numpy.ndarray` Sorted", "array size <2, unable to compute energy per volume.\") for i_part in self.parts_by_index(time,", "energies : `numpy.ndarray` Table of energies sampled from the energy spectrum. \"\"\" cdf", "np.ediff1d(t, to_end=(t[-1] - t[-2])) if not flavor.is_electron: E_per_V *= 2 print('Completed') return E_per_V", "that last partition always has 2 or more elements if len(i_part[-1]) < 2:", "extrapolate=False) mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False) pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False ) elif", "neutrino energies to compute the energy PDF. n : int Number of energy", "'/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile, lmc, smc)", "core bounce. E : `numpy.ndarray` Sorted grid of neutrino energies to compute the", "self.parts_by_index(time, n): # Limits memory usage E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E, flavor).value *", "# Given t, get current average energy and pinch parameter. # Use simple", "> 0), out=np.zeros(len(luminosity))) else: if mean_energy > 0.: flux = luminosity / mean_energy", "E, extrapolate=False) pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False ) elif config.source.table.format.lower() == 'ascii': #", "inverse transform sampling of the CDF of the neutrino energy distribution. Parameters ----------", "unable to compute energy per volume.\") for i_part in self.parts_by_index(time, n): # Limits", "'FixedDistance': # FixedDistance model. r = parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r,", "\\ E=Enu.reshape(-1, 1)) return E_pdf else: if Ea <= 0.: return np.zeros_like(E) elif", "{} if config.source.table.format.lower() == 'fits': # Open FITS file, which contains a luminosity", "Source number flux (unit-less, count of neutrinos). \"\"\" t = time.to(u.s).value luminosity =", "= Table.read(fitsfile) time = sn_data_table['TIME'].to('s') # Loop over all flavors in the table:", ": float Time relative to core bounce. E : `numpy.ndarray` Sorted grid of", "is non-physical to have a<0 but some model files/interpolations still have this a[a<0]", "multiple of n if len(i_part)*n != x.size: i_part += [ np.arange( len(i_part)*n, x.size", ":class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- energies : `numpy.ndarray` Table of energies sampled from", "2: i_part[-2] = np.append(i_part[-2], i_part[-1]) i_part = i_part[0:-1] return i_part def get_time(self): \"\"\"Return", "(units seconds). flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- flux : float Source", "options to add LMC and SMC. fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC)", "i_part def get_time(self): \"\"\"Return source time as numpy array. Returns ------- time :", "dmtype = config.source.progenitor.distance.model if dmtype == 'FixedDistance': # FixedDistance model. r = parse_quantity(config.source.progenitor.distance.distance)", "zero, return rate in units neutrinos # per second, elsewhere, returns zero. #", "to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- mean_energy : float", "/ u.m**3 t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0: Enu[0]", "in self.parts_by_index(time, n): # Limits memory usage E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E, flavor).value", "(units of energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return", "some model files/interpolations still have this a[a<0] = 0 cut = (a >=", "by neutrinos of requested flavor \"\"\" H2O_in_ice = 3.053e28 # 1 / u.m**3", "float Time relative to core bounce (units seconds). flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor.", "mean_energy, pinch = {}, {}, {} if config.source.table.format.lower() == 'fits': # Open FITS", "flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- luminosity : float Source luminosity (units", "alpha = sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor] =", "loggamma, gdtr from scipy.interpolate import PchipInterpolator class Source: def __init__(self, name, spectral_model, progenitor_mass,", "get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity at time t for a given flavor.", "dr) elif dmtype == 'StellarDensity': # StellarDensity model, with options to add LMC", "the neutrino energy distribution at time t u = np.random.uniform(n) j = np.searchsorted(cdf,", "E[-1].to('MeV').value cut = (0 < j) & (j < len(E)-1) j = j[cut]", "numpy size is called on them, even if it is 1. E_per_V =", "np.zeros( time.size ) if time.size < 2: raise RuntimeError(\"Time array size <2, unable", "neutrino energy distribution at time t. Parameters ---------- t : float Time relative", "<= 0.: return np.zeros_like(E) elif a <= 0.: return self.energy_pdf(0, Ea, E.value).real else:", "= '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile, lmc,", "parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r, dr) elif dmtype == 'StellarDensity': # StellarDensity model, with", "of size n x time.size is created and can be very memory inefficient.", "Ensure that last partition always has 2 or more elements if len(i_part[-1]) <", "bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- mean_energy : float Source mean", ": :class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum : `numpy.ndarray` (Units vary, m**2) Grid of the", "= E.to(u.MeV).value if Enu[0] == 0: Enu[0] = 1e-10 * u.MeV phot =", "(1 + a) * np.log(Ea) - (1 + a) * (E / Ea))", "+ a) + a * np.log(E) - \\ (1 + a) * np.log(Ea)", "to create a Source. Returns ------- Source An initialized source model. \"\"\" #", "' '*(10-len(flavor.name))), end='') # The following two lines exploit the fact that astropy", "of energy samples to produce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- energies", "range(nParts) ] # Generate final partition of size <n if x.size is not", "Flavor from .stellardist import FixedDistance, StellarDensity from .config import parse_quantity from astropy import", "# always return a number when numpy size is called on them, even", "= config.source.progenitor.distance.model if dmtype == 'FixedDistance': # FixedDistance model. r = parse_quantity(config.source.progenitor.distance.distance) dr", "the energy PDF. n : int Number of energy samples to produce. flavor", "(list, tuple, np.ndarray)): # It is non-physical to have a<0 but some model", "the neutrino energy distribution. Parameters ---------- t : float Time relative to core", "of size-n numpy arrays containing indices for the elements of x, and one", "random energy sampling. self.energy_cdf = lambda a, Ea, E: \\ gdtr(1., a +", "PchipInterpolator(time, E, extrapolate=False) pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False ) elif config.source.table.format.lower() == 'ascii':", "values computed as a function of energy. \"\"\" # Given t, get current", "relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- pinch :", "t for a particular neutrino flavor. The energies are generated via inverse transform", "if Enu[0] == 0: Enu[0] = 1e-10 * u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) #", "astropy quantities will # always return a number when numpy size is called", "remaining elements of x. Returns ------- i_part : list List of index partitions", "Time relative to core bounce (units seconds). flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns", "(Ea > 0) E_pdf = np.zeros( (Enu.size, t.size), dtype = float ) E_pdf[:,", "profile (units of s). \"\"\" return self.time def get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source", "np.arange( len(i_part)*n, x.size ) ] # Ensure that last partition always has 2", "supported! Promise, promise. raise ValueError('Unsupported format: \"ASCII\"') else: raise ValueError('Unknown format {}'.format(config.source.table.format)) #", "flux = np.divide(luminosity, mean_energy, where=(mean_energy > 0), out=np.zeros(len(luminosity))) else: if mean_energy > 0.:", "source flux at time t for a given flavor. Parameters ---------- t :", "if len(i_part)*n != x.size: i_part += [ np.arange( len(i_part)*n, x.size ) ] #", "= photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist = self.progenitor_distance.to(u.m).value # m**2 flux = self.get_flux( time,", "flavor. The energies are generated via inverse transform sampling of the CDF of", "and mean energy table. fitsfile = '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table = Table.read(fitsfile) time =", "cut = (a < 0) & (Ea > 0) E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1,", "1.) * (E / Ea)) def parts_by_index(self, x, n): \"\"\"Returns a list of", "samples to produce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- energies : `numpy.ndarray`", "n : int Number of energy samples to produce. flavor : :class:`asteria.neutrino.Flavor` Neutrino", "(unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return source flux at time", "'fits': # Open FITS file, which contains a luminosity table and a pinching", "pinch # Energy PDF function is assumed to be like a gamma function,", "for flavor in Flavor: fl = flavor.name.upper() if any( fl in col for", "neutrino interactions. Parameters ---------- time : float (units s) Time relative to core", "x, n): \"\"\"Returns a list of size-n numpy arrays containing indices for the", "0] = E[0].to('MeV').value energies[j >= len(E)-1] = E[-1].to('MeV').value cut = (0 < j)", "photonic_energy_per_vol(self, time, E, flavor, photon_spectrum, mixing=None, n=1000): \"\"\"Compute the energy deposited in a", "- E[j]) / (cdf[j+1] - cdf[j]) * (u[cut] - cdf[j]) energies[cut] = en", "and SMC. fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model", "<= 0] = E[0].to('MeV').value energies[j >= len(E)-1] = E[-1].to('MeV').value cut = (0 <", "= np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) # Energy CDF, useful for random energy sampling.", "Enu[0] == 0.: Enu[0] = 1e-10 # u.MeV a = self.get_pinch_parameter(t, flavor) Ea", "return E_pdf else: if Ea <= 0.: return np.zeros_like(E) elif a <= 0.:", "grid of neutrino energies to compute the energy PDF. n : int Number", "It is non-physical to have a<0 but some model files/interpolations still have this", "Source time profile (units of s). \"\"\" return self.time def get_luminosity(self, t, flavor=Flavor.NU_E_BAR):", "self.time = time self.luminosity = luminosity self.mean_energy = mean_energy self.pinch = pinch #", "E, flavor): return self.energy_spectrum(t, E, flavor) * self.get_flux(t, flavor) else: nu_spectrum = mixing(self)", "are numpy array). \"\"\" nParts = x.size//n i_part = [ np.arange( i*n, (i+1)*n", "used to create a Source. Returns ------- Source An initialized source model. \"\"\"", "t u = np.random.uniform(n) j = np.searchsorted(cdf, u) # Linearly interpolate in the", "L = sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor]", "np.trapz( nu_spectrum(time[i_part], E, flavor).value * phot, Enu, axis=0) E_per_V *= H2O_in_ice / (", "pinching paramter alpha at time t for a given flavor. Parameters ---------- t", "spectrum. \"\"\" cdf = self.energy_cdf(flavor, t, E) energies = np.zeros(n, dtype=float) # Generate", "dmtype == 'FixedDistance': # FixedDistance model. r = parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model", "flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching paramter alpha at time t for a given flavor.", "# 1 / u.m**3 t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] ==", "Neutrino flavor. Returns ------- energies : `numpy.ndarray` Table of energies sampled from the", "<= 0.: return self.energy_pdf(0, Ea, E.value).real else: return self.energy_pdf(a, Ea, E.value).real def sample_energies(self,", "if isinstance(t, (list, tuple, np.ndarray)): flux = np.divide(luminosity, mean_energy, where=(mean_energy > 0), out=np.zeros(len(luminosity)))", "i in range(nParts) ] # Generate final partition of size <n if x.size", "(units of s). \"\"\" return self.time def get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity", "at some time t for a particular neutrino flavor. The energies are generated", "from .config import parse_quantity from astropy import units as u from astropy.table import", "def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF of the neutrino energy distribution", "lines exploit the fact that astropy quantities will # always return a number", "mean energy and pinch parameter alpha. True for # nearly all CCSN models.", "= '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table = Table.read(fitsfile) time = sn_data_table['TIME'].to('s') # Loop over all", "neutrinos of requested flavor \"\"\" H2O_in_ice = 3.053e28 # 1 / u.m**3 t", "flux = luminosity / mean_energy else: flux = 0 return flux / u.s", "of x, and one size-m array ( with m<n ) if there are", "of neutrino energies flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum : `numpy.ndarray` (Units vary,", "* (E / Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) # Energy CDF,", "self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) # Energy CDF, useful for random energy", "---------- config : :class:`asteria.config.Configuration` Configuration parameters used to create a Source. Returns -------", ">= len(E)-1] = E[-1].to('MeV').value cut = (0 < j) & (j < len(E)-1)", "i_part[-1]) i_part = i_part[0:-1] return i_part def get_time(self): \"\"\"Return source time as numpy", "self.energy_pdf = lambda a, Ea, E: \\ np.exp((1 + a) * np.log(1 +", "deposited in a cubic meter of ice by photons from SN neutrino interactions.", "create a Source. Returns ------- Source An initialized source model. \"\"\" # Dictionary", "else: raise ValueError('Unrecognized distance_model: {}'.format(dmtype)) return Source(config.source.name, config.source.model, parse_quantity(config.source.progenitor.mass), distance_model.distance(), time, luminosity, mean_energy,", "* (E / Ea)) def parts_by_index(self, x, n): \"\"\"Returns a list of size-n", "mixing=None, n=1000): \"\"\"Compute the energy deposited in a cubic meter of ice by", "------- i_part : list List of index partitions (partitions are numpy array). \"\"\"", "CDF, useful for random energy sampling. self.energy_cdf = lambda a, Ea, E: \\", "to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. E : `numpy.ndarray` Sorted grid", "# Limits memory usage E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E, flavor).value * phot, Enu,", "of energy. \"\"\" # Given t, get current average energy and pinch parameter.", "as numpy array. Returns ------- time : float Source time profile (units of", "/ u.m**3 def initialize(config): \"\"\"Initialize a Source model from configuration parameters. Parameters ----------", "spectral_model, progenitor_mass, progenitor_distance, time={}, luminosity={}, mean_energy={}, pinch={}): self.name = name self.model = spectral_model", "E.value).real else: return self.energy_pdf(a, Ea, E.value).real def sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate", "a) * (E / Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) # Energy", "1., (a + 1.) * (E / Ea)) def parts_by_index(self, x, n): \"\"\"Returns", "to_end=(t[-1] - t[-2])) if not flavor.is_electron: E_per_V *= 2 print('Completed') return E_per_V *", "spectrum of the neutrinos at any given time. \"\"\" from __future__ import print_function,", "size is called on them, even if it is 1. E_per_V = np.zeros(", "is not multiple of n if len(i_part)*n != x.size: i_part += [ np.arange(", "a) - loggamma(1 + a) + a * np.log(E) - \\ (1 +", "(list, tuple, np.ndarray)): flux = np.divide(luminosity, mean_energy, where=(mean_energy > 0), out=np.zeros(len(luminosity))) else: if", "MeV, sorted according to parameter E n : int Maximum number of time", "# m**2 dist = self.progenitor_distance.to(u.m).value # m**2 flux = self.get_flux( time, flavor )", "of energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source", "energy distribution at time t. Parameters ---------- t : float Time relative to", "* u.MeV def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching paramter alpha at time", "3.053e28 # 1 / u.m**3 t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0]", "== 'ascii': # ASCII will be supported! Promise, promise. raise ValueError('Unsupported format: \"ASCII\"')", "time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0.: Enu[0] = 1e-10 # u.MeV", "PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False) pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False", "to compute energy per volume.\") for i_part in self.parts_by_index(time, n): # Limits memory", "print('Completed') return E_per_V * u.MeV / u.m**3 def initialize(config): \"\"\"Initialize a Source model", "a cubic meter of ice by photons from SN neutrino interactions. Parameters ----------", "E.to(u.MeV).value if Enu[0] == 0: Enu[0] = 1e-10 * u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1))", "An initialized source model. \"\"\" # Dictionary of L, <E>, and alpha versus", "model. \"\"\" # Dictionary of L, <E>, and alpha versus time, keyed by", "mean energy and lepton path length per MeV, sorted according to parameter E", "be like a gamma function, # parameterized by mean energy and pinch parameter", "* np.pi * dist**2) * np.ediff1d(t, to_end=(t[-1] - t[-2])) if not flavor.is_electron: E_per_V", "as the energy spectrum of the neutrinos at any given time. \"\"\" from", "energies[j <= 0] = E[0].to('MeV').value energies[j >= len(E)-1] = E[-1].to('MeV').value cut = (0", "= float ) E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut =", "mean_energy = self.get_mean_energy(t, flavor).value if isinstance(t, (list, tuple, np.ndarray)): flux = np.divide(luminosity, mean_energy,", "time as numpy array. Returns ------- time : float Source time profile (units", "loggamma(1 + a) + a * np.log(E) - \\ (1 + a) *", "# StellarDensity model, with options to add LMC and SMC. fitsfile = '/'.join([config.abs_base_path,", ": `numpy.ndarray` (Units vary, m**2) Grid of the product of lepton cross section", "the energy PDF. Returns ------- spectrum : `numpy.ndarray` Table of PDF values computed", "a luminosity table and a pinching # parameter (alpha) and mean energy table.", "astropy.table import Table from abc import ABC, abstractmethod import numpy as np from", "to have a<0 but some model files/interpolations still have this a[a<0] = 0", "\"\"\" nParts = x.size//n i_part = [ np.arange( i*n, (i+1)*n ) for i", "<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\"CCSN neutrino sources. This module encapsulates the", "float Source mean energy (units of energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def", "and lepton path length per MeV, sorted according to parameter E n :", "E : `numpy.ndarray` Sorted grid of neutrino energies to compute the energy PDF.", "(Ea > 0) E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1, 1))", "a random sample of neutrino energies at some time t for a particular", "for a given flavor. Parameters ---------- t : float Time relative to core", "i_part = [ np.arange( i*n, (i+1)*n ) for i in range(nParts) ] #", "+= [ np.arange( len(i_part)*n, x.size ) ] # Ensure that last partition always", "H2O_in_ice = 3.053e28 # 1 / u.m**3 t = time.to(u.s).value Enu = E.to(u.MeV).value", "np from scipy.special import loggamma, gdtr from scipy.interpolate import PchipInterpolator class Source: def", "nParts = x.size//n i_part = [ np.arange( i*n, (i+1)*n ) for i in", "s). \"\"\" return self.time def get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity at time", "the PDF of the neutrino energy distribution at time t. Parameters ---------- t", "can be very memory inefficient. Returns ------- E_per_V Energy per m**3 of ice", "u from astropy.table import Table from abc import ABC, abstractmethod import numpy as", "simple 1D linear interpolation t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] ==", "parts_by_index(self, x, n): \"\"\"Returns a list of size-n numpy arrays containing indices for", "# Dictionary of L, <E>, and alpha versus time, keyed by neutrino flavor.", "flavor.name.upper() if any( fl in col for col in sn_data_table.keys() ): L =", "i_part[-2] = np.append(i_part[-2], i_part[-1]) i_part = i_part[0:-1] return i_part def get_time(self): \"\"\"Return source", "return rate in units neutrinos # per second, elsewhere, returns zero. # flux", "add LMC and SMC. fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc =", "second, elsewhere, returns zero. # flux = np.ediff1d(t, to_end=(t[-1] - t[-2])) * rate", "raise ValueError('Unknown format {}'.format(config.source.table.format)) # Set up the distance model. distance_model = None", "= np.divide(luminosity, mean_energy, where=(mean_energy > 0), out=np.zeros(len(luminosity))) else: if mean_energy > 0.: flux", "Open FITS file, which contains a luminosity table and a pinching # parameter", "array). \"\"\" nParts = x.size//n i_part = [ np.arange( i*n, (i+1)*n ) for", "Returns ------- energies : `numpy.ndarray` Table of energies sampled from the energy spectrum.", "Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. E : `numpy.ndarray`", "time as well as the energy spectrum of the neutrinos at any given", "* np.ediff1d(t, to_end=(t[-1] - t[-2])) if not flavor.is_electron: E_per_V *= 2 print('Completed') return", "where=(mean_energy > 0), out=np.zeros(len(luminosity))) else: if mean_energy > 0.: flux = luminosity /", "nu_spectrum = mixing(self) print('Beginning {0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='') # The following", "i*n, (i+1)*n ) for i in range(nParts) ] # Generate final partition of", "per volume.\") for i_part in self.parts_by_index(time, n): # Limits memory usage E_per_V[i_part] +=", "energy per volume.\") for i_part in self.parts_by_index(time, n): # Limits memory usage E_per_V[i_part]", "/ (cdf[j+1] - cdf[j]) * (u[cut] - cdf[j]) energies[cut] = en return energies", "photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist = self.progenitor_distance.to(u.m).value # m**2 flux = self.get_flux( time, flavor", "more elements if len(i_part[-1]) < 2: i_part[-2] = np.append(i_part[-2], i_part[-1]) i_part = i_part[0:-1]", "flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. E : `numpy.ndarray` Sorted grid of neutrino energies", "be supported! Promise, promise. raise ValueError('Unsupported format: \"ASCII\"') else: raise ValueError('Unknown format {}'.format(config.source.table.format))", "+ a) - loggamma(1 + a) + a * np.log(E) - \\ (1", "u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist = self.progenitor_distance.to(u.m).value # m**2 flux =", "raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False) pinch[flavor]", "Sorted grid of neutrino energies flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum : `numpy.ndarray`", ":class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- luminosity : float Source luminosity (units of power).", ") E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut = (a <", "self.progenitor_distance.to(u.m).value # m**2 flux = self.get_flux( time, flavor ) # s**-1 if mixing", "with options to add LMC and SMC. fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc =", "\"ASCII\"') else: raise ValueError('Unknown format {}'.format(config.source.table.format)) # Set up the distance model. distance_model", "flux : float Source number flux (unit-less, count of neutrinos). \"\"\" t =", "relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- mean_energy :", "== 0: Enu[0] = 1e-10 * u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist", "= np.zeros( time.size ) if time.size < 2: raise RuntimeError(\"Time array size <2,", "random sample of neutrino energies at some time t for a particular neutrino", "as u from astropy.table import Table from abc import ABC, abstractmethod import numpy", "from SN neutrino interactions. Parameters ---------- time : float (units s) Time relative", "modeled in the CCSN literature. For each species of neutrino one requires an", "self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut = (a < 0) & (Ea >", "Flavor: fl = flavor.name.upper() if any( fl in col for col in sn_data_table.keys()", "Configuration parameters used to create a Source. Returns ------- Source An initialized source", "self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t, flavor).value if isinstance(t, (list, tuple, np.ndarray)): flux =", "flux = np.ediff1d(t, to_end=(t[-1] - t[-2])) * rate # # return flux def", "= time self.luminosity = luminosity self.mean_energy = mean_energy self.pinch = pinch # Energy", "0) & (Ea > 0) E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \\", "Enu[0] == 0: Enu[0] = 1e-10 * u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2", "section with lepton mean energy and lepton path length per MeV, sorted according", "Neutrino flavor. Returns ------- flux : float Source number flux (unit-less, count of", "from snewpy.neutrino import Flavor from .stellardist import FixedDistance, StellarDensity from .config import parse_quantity", "a) * np.log(Ea) - (1 + a) * (E / Ea)) self.v_energy_pdf =", "a <= 0.: return self.energy_pdf(0, Ea, E.value).real else: return self.energy_pdf(a, Ea, E.value).real def", "if config.source.table.format.lower() == 'fits': # Open FITS file, which contains a luminosity table", "time self.luminosity = luminosity self.mean_energy = mean_energy self.pinch = pinch # Energy PDF", "self.get_pinch_parameter(t, flavor) Ea = self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t, (list, tuple, np.ndarray)): # It", "u.s # Where the mean energy is not zero, return rate in units", "else: flux = 0 return flux / u.s # Where the mean energy", "encapsulates the basic parameters of neutrino fluxes from supernovae as modeled in the", "Source mean energy (units of energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def get_pinch_parameter(self,", "flavor) * self.get_flux(t, flavor) else: nu_spectrum = mixing(self) print('Beginning {0} simulation... {1}'.format(flavor.name, '", "energy is not zero, return rate in units neutrinos # per second, elsewhere,", "n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a random sample of neutrino energies at some time t", "random energy energies[j <= 0] = E[0].to('MeV').value energies[j >= len(E)-1] = E[-1].to('MeV').value cut", "in col for col in sn_data_table.keys() ): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV')", "neutrino fluxes from supernovae as modeled in the CCSN literature. For each species", "list of size-n numpy arrays containing indices for the elements of x, and", "rate # # return flux def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF", "flavors in the table: for flavor in Flavor: fl = flavor.name.upper() if any(", "`numpy.ndarray` Sorted grid of neutrino energies flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum :", "core bounce (units seconds). flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- flux :", "(a < 0) & (Ea > 0) E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1,", "Enu[0] = 1e-10 * u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist = self.progenitor_distance.to(u.m).value", "t[-2])) if not flavor.is_electron: E_per_V *= 2 print('Completed') return E_per_V * u.MeV /", "simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='') # The following two lines exploit the fact", "Enu = E.to(u.MeV).value if Enu[0] == 0.: Enu[0] = 1e-10 # u.MeV a", "E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1, 1)) return E_pdf else:", "versus time, keyed by neutrino flavor. luminosity, mean_energy, pinch = {}, {}, {}", "u.MeV def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching paramter alpha at time t", "t : float Time relative to core bounce (units seconds). flavor : :class:`asteria.neutrino.Flavor`", "import ABC, abstractmethod import numpy as np from scipy.special import loggamma, gdtr from", "(u[cut] - cdf[j]) energies[cut] = en return energies def photonic_energy_per_vol(self, time, E, flavor,", "\\ E=Enu.reshape(-1,1)) cut = (a < 0) & (Ea > 0) E_pdf[:, cut]", "0: Enu[0] = 1e-10 * u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist =", "flavor. Returns ------- flux : float Source number flux (unit-less, count of neutrinos).", "(u.erg / u.s) def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source mean energy at time", "ABC, abstractmethod import numpy as np from scipy.special import loggamma, gdtr from scipy.interpolate", "neutrinos at any given time. \"\"\" from __future__ import print_function, division from snewpy.neutrino", "any given time. \"\"\" from __future__ import print_function, division from snewpy.neutrino import Flavor", "= i_part[0:-1] return i_part def get_time(self): \"\"\"Return source time as numpy array. Returns", "time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF of the neutrino energy distribution at time", "neutrino energy distribution at time t u = np.random.uniform(n) j = np.searchsorted(cdf, u)", "# of the neutrino energy distribution at time t u = np.random.uniform(n) j", "time, keyed by neutrino flavor. luminosity, mean_energy, pinch = {}, {}, {} if", "m**2 flux = self.get_flux( time, flavor ) # s**-1 if mixing is None:", "according to parameter E n : int Maximum number of time steps to", "/ Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) # Energy CDF, useful for", "files/interpolations still have this a[a<0] = 0 cut = (a >= 0) &", "0), out=np.zeros(len(luminosity))) else: if mean_energy > 0.: flux = luminosity / mean_energy else:", "cut = (0 < j) & (j < len(E)-1) j = j[cut] en", "import Table from abc import ABC, abstractmethod import numpy as np from scipy.special", "initialize(config): \"\"\"Initialize a Source model from configuration parameters. Parameters ---------- config : :class:`asteria.config.Configuration`", "model, with options to add LMC and SMC. fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc", "dtype=float) # Generate a random number between 0 and 1 and compare to", "for the elements of x, and one size-m array ( with m<n )", "`numpy.ndarray` Sorted grid of neutrino energies to compute the energy PDF. Returns -------", "for a particular neutrino flavor. The energies are generated via inverse transform sampling", "energy distribution. Parameters ---------- t : float Time relative to core bounce. E", "sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif fl == 'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s') E =", "extrapolate=False) pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False ) elif config.source.table.format.lower() == 'ascii': # ASCII", "alpha. True for # nearly all CCSN models. self.energy_pdf = lambda a, Ea,", "---------- t : float Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino", "from scipy.special import loggamma, gdtr from scipy.interpolate import PchipInterpolator class Source: def __init__(self,", "flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t, flavor).value if isinstance(t, (list, tuple, np.ndarray)): flux = np.divide(luminosity,", "function of energy. \"\"\" # Given t, get current average energy and pinch", "config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile, lmc, smc) else:", "- t[-2])) if not flavor.is_electron: E_per_V *= 2 print('Completed') return E_per_V * u.MeV", "Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- luminosity", ": :class:`asteria.config.Configuration` Configuration parameters used to create a Source. Returns ------- Source An", "= self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1, 1)) return E_pdf else: if Ea", "ValueError('Unsupported format: \"ASCII\"') else: raise ValueError('Unknown format {}'.format(config.source.table.format)) # Set up the distance", "luminosity / mean_energy else: flux = 0 return flux / u.s # Where", "elements if len(i_part[-1]) < 2: i_part[-2] = np.append(i_part[-2], i_part[-1]) i_part = i_part[0:-1] return", "self.progenitor_distance = progenitor_distance self.time = time self.luminosity = luminosity self.mean_energy = mean_energy self.pinch", "to be like a gamma function, # parameterized by mean energy and pinch", "core bounce. E : `numpy.ndarray` Sorted grid of neutrino energies flavor : :class:`asteria.neutrino.Flavor`", "ValueError('Unknown format {}'.format(config.source.table.format)) # Set up the distance model. distance_model = None dmtype", "flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- energies : `numpy.ndarray` Table of energies", "= E[0].to('MeV').value energies[j >= len(E)-1] = E[-1].to('MeV').value cut = (0 < j) &", "flavor ) # s**-1 if mixing is None: def nu_spectrum(t, E, flavor): return", "(partitions are numpy array). \"\"\" nParts = x.size//n i_part = [ np.arange( i*n,", "a gamma function, # parameterized by mean energy and pinch parameter alpha. True", "source time as numpy array. Returns ------- time : float Source time profile", "partitions (partitions are numpy array). \"\"\" nParts = x.size//n i_part = [ np.arange(", "\\ np.exp((1 + a) * np.log(1 + a) - loggamma(1 + a) +", "= lambda a, Ea, E: \\ np.exp((1 + a) * np.log(1 + a)", "j) & (j < len(E)-1) j = j[cut] en = E[j] + (E[j+1]", "source model. \"\"\" # Dictionary of L, <E>, and alpha versus time, keyed", "at time t for a given flavor. Parameters ---------- t : float Time", "grid of neutrino energies to compute the energy PDF. Returns ------- spectrum :", "* (u.erg / u.s) def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source mean energy at", "u.MeV / u.m**3 def initialize(config): \"\"\"Initialize a Source model from configuration parameters. Parameters", "the table: for flavor in Flavor: fl = flavor.name.upper() if any( fl in", "(a >= 0) & (Ea > 0) E_pdf = np.zeros( (Enu.size, t.size), dtype", "# Set up the distance model. distance_model = None dmtype = config.source.progenitor.distance.model if", "flavor. Returns ------- energies : `numpy.ndarray` Table of energies sampled from the energy", "have a<0 but some model files/interpolations still have this a[a<0] = 0 cut", ") # s**-1 if mixing is None: def nu_spectrum(t, E, flavor): return self.energy_spectrum(t,", "of the neutrino energy distribution at time t. Parameters ---------- t : float", "flavor): return self.energy_spectrum(t, E, flavor) * self.get_flux(t, flavor) else: nu_spectrum = mixing(self) print('Beginning", "parameter (unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return source flux at", "flavor. Returns ------- luminosity : float Source luminosity (units of power). \"\"\" return", "if mixing is None: def nu_spectrum(t, E, flavor): return self.energy_spectrum(t, E, flavor) *", "phot, Enu, axis=0) E_per_V *= H2O_in_ice / ( 4 * np.pi * dist**2)", "mean_energy > 0.: flux = luminosity / mean_energy else: flux = 0 return", "not flavor.is_electron: E_per_V *= 2 print('Completed') return E_per_V * u.MeV / u.m**3 def", "sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a random sample of neutrino energies at", "\"\"\" return self.time def get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity at time t", "volume.\") for i_part in self.parts_by_index(time, n): # Limits memory usage E_per_V[i_part] += np.trapz(", "is created and can be very memory inefficient. Returns ------- E_per_V Energy per", "flavor=Flavor.NU_E_BAR): \"\"\"Generate a random sample of neutrino energies at some time t for", "and compare to the CDF # of the neutrino energy distribution at time", "pinch={}): self.name = name self.model = spectral_model self.progenitor_mass = progenitor_mass self.progenitor_distance = progenitor_distance", "assumed to be like a gamma function, # parameterized by mean energy and", "\"\"\"Return source flux at time t for a given flavor. Parameters ---------- t", "parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile, lmc, smc) else: raise ValueError('Unrecognized distance_model: {}'.format(dmtype)) return Source(config.source.name,", "# FixedDistance model. r = parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r, dr)", ": float Source mean energy (units of energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV", "a number when numpy size is called on them, even if it is", "Ea <= 0.: return np.zeros_like(E) elif a <= 0.: return self.energy_pdf(0, Ea, E.value).real", "Table of energies sampled from the energy spectrum. \"\"\" cdf = self.energy_cdf(flavor, t,", "alpha at time t for a given flavor. Parameters ---------- t : float", "neutrinos). \"\"\" t = time.to(u.s).value luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t, flavor).value", "length per MeV, sorted according to parameter E n : int Maximum number", "a Source model from configuration parameters. Parameters ---------- config : :class:`asteria.config.Configuration` Configuration parameters", "a random energy energies[j <= 0] = E[0].to('MeV').value energies[j >= len(E)-1] = E[-1].to('MeV').value", "== 'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X'] else: raise", "flavor) Ea = self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t, (list, tuple, np.ndarray)): # It is", "numpy arrays containing indices for the elements of x, and one size-m array", "*= H2O_in_ice / ( 4 * np.pi * dist**2) * np.ediff1d(t, to_end=(t[-1] -", "fl == 'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X'] else:", "elsewhere, returns zero. # flux = np.ediff1d(t, to_end=(t[-1] - t[-2])) * rate #", "time. \"\"\" from __future__ import print_function, division from snewpy.neutrino import Flavor from .stellardist", "= FixedDistance(r, dr) elif dmtype == 'StellarDensity': # StellarDensity model, with options to", "StellarDensity from .config import parse_quantity from astropy import units as u from astropy.table", "usage E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E, flavor).value * phot, Enu, axis=0) E_per_V *=", "number flux (unit-less, count of neutrinos). \"\"\" t = time.to(u.s).value luminosity = self.get_luminosity(t,", "def initialize(config): \"\"\"Initialize a Source model from configuration parameters. Parameters ---------- config :", "by mean energy and pinch parameter alpha. True for # nearly all CCSN", "the distance model. distance_model = None dmtype = config.source.progenitor.distance.model if dmtype == 'FixedDistance':", ": `numpy.ndarray` Sorted grid of neutrino energies to compute the energy PDF. n", "energies = np.zeros(n, dtype=float) # Generate a random number between 0 and 1", "energy table. fitsfile = '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table = Table.read(fitsfile) time = sn_data_table['TIME'].to('s') #", "self.energy_pdf(0, Ea, E.value).real else: return self.energy_pdf(a, Ea, E.value).real def sample_energies(self, t, E, n=1,", "------- Source An initialized source model. \"\"\" # Dictionary of L, <E>, and", "-1), Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1, 1)) return E_pdf else: if Ea <= 0.:", "PchipInterpolator(time, alpha, extrapolate=False ) elif config.source.table.format.lower() == 'ascii': # ASCII will be supported!", "from scipy.interpolate import PchipInterpolator class Source: def __init__(self, name, spectral_model, progenitor_mass, progenitor_distance, time={},", "pinch parameter. # Use simple 1D linear interpolation t = time.to(u.s).value Enu =", "SN neutrino interactions. Parameters ---------- time : float (units s) Time relative to", "time.size is created and can be very memory inefficient. Returns ------- E_per_V Energy", "= pinch # Energy PDF function is assumed to be like a gamma", ":class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- mean_energy : float Source mean energy (units of", "(alpha) and mean energy table. fitsfile = '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table = Table.read(fitsfile) time", "fitsfile = '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table = Table.read(fitsfile) time = sn_data_table['TIME'].to('s') # Loop over", "= sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif fl == 'NU_X_BAR': L", "two lines exploit the fact that astropy quantities will # always return a", ":class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- flux : float Source number flux (unit-less, count", "None: def nu_spectrum(t, E, flavor): return self.energy_spectrum(t, E, flavor) * self.get_flux(t, flavor) else:", "if time.size < 2: raise RuntimeError(\"Time array size <2, unable to compute energy", "= {}, {}, {} if config.source.table.format.lower() == 'fits': # Open FITS file, which", "/ Ea)) def parts_by_index(self, x, n): \"\"\"Returns a list of size-n numpy arrays", "bounce. E : `numpy.ndarray` Sorted grid of neutrino energies to compute the energy", "\"\"\"CCSN neutrino sources. This module encapsulates the basic parameters of neutrino fluxes from", "signature='(1,n),(1,n)->(m,n)' ) # Energy CDF, useful for random energy sampling. self.energy_cdf = lambda", "= self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t, flavor).value if isinstance(t, (list, tuple, np.ndarray)): flux", "sn_data_table = Table.read(fitsfile) time = sn_data_table['TIME'].to('s') # Loop over all flavors in the", "self.model = spectral_model self.progenitor_mass = progenitor_mass self.progenitor_distance = progenitor_distance self.time = time self.luminosity", "to core bounce. E : `numpy.ndarray` Sorted grid of neutrino energies flavor :", "= PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False) pinch[flavor] = PchipInterpolator(time, alpha,", "inefficient. Returns ------- E_per_V Energy per m**3 of ice deposited by neutrinos of", "once. A temporary numpy array of size n x time.size is created and", "final partition of size <n if x.size is not multiple of n if", "non-physical to have a<0 but some model files/interpolations still have this a[a<0] =", "of PDF values computed as a function of energy. \"\"\" # Given t,", "# u.MeV a = self.get_pinch_parameter(t, flavor) Ea = self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t, (list,", "photon_spectrum : `numpy.ndarray` (Units vary, m**2) Grid of the product of lepton cross", "distance model. distance_model = None dmtype = config.source.progenitor.distance.model if dmtype == 'FixedDistance': #", "= np.zeros(n, dtype=float) # Generate a random number between 0 and 1 and", "has 2 or more elements if len(i_part[-1]) < 2: i_part[-2] = np.append(i_part[-2], i_part[-1])", "last partition always has 2 or more elements if len(i_part[-1]) < 2: i_part[-2]", "Neutrino flavor. photon_spectrum : `numpy.ndarray` (Units vary, m**2) Grid of the product of", "mean energy is not zero, return rate in units neutrinos # per second,", "flavor. Returns ------- mean_energy : float Source mean energy (units of energy). \"\"\"", "np.append(i_part[-2], i_part[-1]) i_part = i_part[0:-1] return i_part def get_time(self): \"\"\"Return source time as", "E : `numpy.ndarray` Sorted grid of neutrino energies flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor.", "memory inefficient. Returns ------- E_per_V Energy per m**3 of ice deposited by neutrinos", "time.size < 2: raise RuntimeError(\"Time array size <2, unable to compute energy per", "energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF of the neutrino energy distribution at", "CCSN literature. For each species of neutrino one requires an estimate of the", "- t[-2])) * rate # # return flux def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR):", "Table from abc import ABC, abstractmethod import numpy as np from scipy.special import", "model. distance_model = None dmtype = config.source.progenitor.distance.model if dmtype == 'FixedDistance': # FixedDistance", "literature. For each species of neutrino one requires an estimate of the luminosity", "return self.energy_spectrum(t, E, flavor) * self.get_flux(t, flavor) else: nu_spectrum = mixing(self) print('Beginning {0}", "Source An initialized source model. \"\"\" # Dictionary of L, <E>, and alpha", "( 4 * np.pi * dist**2) * np.ediff1d(t, to_end=(t[-1] - t[-2])) if not", ": int Maximum number of time steps to compute at once. A temporary", "to core bounce (units seconds). flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- flux", "which contains a luminosity table and a pinching # parameter (alpha) and mean", "E[j] + (E[j+1] - E[j]) / (cdf[j+1] - cdf[j]) * (u[cut] - cdf[j])", "distance_model = FixedDistance(r, dr) elif dmtype == 'StellarDensity': # StellarDensity model, with options", "of ice deposited by neutrinos of requested flavor \"\"\" H2O_in_ice = 3.053e28 #", "sample of neutrino energies at some time t for a particular neutrino flavor.", "a, Ea, E: \\ gdtr(1., a + 1., (a + 1.) * (E", "n): # Limits memory usage E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E, flavor).value * phot,", "size n x time.size is created and can be very memory inefficient. Returns", "------- flux : float Source number flux (unit-less, count of neutrinos). \"\"\" t", "parse_quantity from astropy import units as u from astropy.table import Table from abc", "compute energy per volume.\") for i_part in self.parts_by_index(time, n): # Limits memory usage", "= (0 < j) & (j < len(E)-1) j = j[cut] en =", "t, flavor=Flavor.NU_E_BAR): \"\"\"Return source mean energy at time t for a given flavor.", "n): \"\"\"Returns a list of size-n numpy arrays containing indices for the elements", "from __future__ import print_function, division from snewpy.neutrino import Flavor from .stellardist import FixedDistance,", "StellarDensity model, with options to add LMC and SMC. fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path])", "> 0.: flux = luminosity / mean_energy else: flux = 0 return flux", "= parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile, lmc, smc) else: raise ValueError('Unrecognized", "E, flavor).value * phot, Enu, axis=0) E_per_V *= H2O_in_ice / ( 4 *", "u) # Linearly interpolate in the CDF to produce a random energy energies[j", "E[0].to('MeV').value energies[j >= len(E)-1] = E[-1].to('MeV').value cut = (0 < j) & (j", "for i_part in self.parts_by_index(time, n): # Limits memory usage E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part],", "E: \\ np.exp((1 + a) * np.log(1 + a) - loggamma(1 + a)", "compute at once. A temporary numpy array of size n x time.size is", "m**3 of ice deposited by neutrinos of requested flavor \"\"\" H2O_in_ice = 3.053e28", "partition of size <n if x.size is not multiple of n if len(i_part)*n", "/ mean_energy else: flux = 0 return flux / u.s # Where the", "of neutrino fluxes from supernovae as modeled in the CCSN literature. For each", "at once. A temporary numpy array of size n x time.size is created", "list List of index partitions (partitions are numpy array). \"\"\" nParts = x.size//n", "of lepton cross section with lepton mean energy and lepton path length per", "u.m**3 def initialize(config): \"\"\"Initialize a Source model from configuration parameters. Parameters ---------- config", "col for col in sn_data_table.keys() ): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha", "the CDF to produce a random energy energies[j <= 0] = E[0].to('MeV').value energies[j", "core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- luminosity : float Source", "E, flavor) * self.get_flux(t, flavor) else: nu_spectrum = mixing(self) print('Beginning {0} simulation... {1}'.format(flavor.name,", "# parameterized by mean energy and pinch parameter alpha. True for # nearly", "in range(nParts) ] # Generate final partition of size <n if x.size is", "temporary numpy array of size n x time.size is created and can be", "E_per_V *= 2 print('Completed') return E_per_V * u.MeV / u.m**3 def initialize(config): \"\"\"Initialize", "the neutrino energy distribution at time t. Parameters ---------- t : float Time", "= self.get_mean_energy(t, flavor).value if isinstance(t, (list, tuple, np.ndarray)): flux = np.divide(luminosity, mean_energy, where=(mean_energy", "( with m<n ) if there are remaining elements of x. Returns -------", "format: \"ASCII\"') else: raise ValueError('Unknown format {}'.format(config.source.table.format)) # Set up the distance model.", "or more elements if len(i_part[-1]) < 2: i_part[-2] = np.append(i_part[-2], i_part[-1]) i_part =", "n : int Maximum number of time steps to compute at once. A", "# -*- coding: utf-8 -*- \"\"\"CCSN neutrino sources. This module encapsulates the basic", "Ea = self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t, (list, tuple, np.ndarray)): # It is non-physical", "if dmtype == 'FixedDistance': # FixedDistance model. r = parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty)", "mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False) pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False ) elif config.source.table.format.lower()", "of ice by photons from SN neutrino interactions. Parameters ---------- time : float", ": :class:`asteria.neutrino.Flavor` Neutrino flavor. E : `numpy.ndarray` Sorted grid of neutrino energies to", "a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut = (a < 0) & (Ea > 0)", "energy spectrum. \"\"\" cdf = self.energy_cdf(flavor, t, E) energies = np.zeros(n, dtype=float) #", "Ea, E: \\ gdtr(1., a + 1., (a + 1.) * (E /", "model files/interpolations still have this a[a<0] = 0 cut = (a >= 0)", "any( fl in col for col in sn_data_table.keys() ): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E", "0.: return np.zeros_like(E) elif a <= 0.: return self.energy_pdf(0, Ea, E.value).real else: return", "else: return self.energy_pdf(a, Ea, E.value).real def sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a", "E, flavor, photon_spectrum, mixing=None, n=1000): \"\"\"Compute the energy deposited in a cubic meter", "Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1, 1)) return E_pdf else: if Ea <= 0.: return", "def get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity at time t for a given", "+ 1.) * (E / Ea)) def parts_by_index(self, x, n): \"\"\"Returns a list", "to core bounce. E : `numpy.ndarray` Sorted grid of neutrino energies to compute", "mean energy at time t for a given flavor. Parameters ---------- t :", ": int Number of energy samples to produce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor.", "flavor. Parameters ---------- t : float Time relative to core bounce (units seconds).", "this a[a<0] = 0 cut = (a >= 0) & (Ea > 0)", "alpha, extrapolate=False ) elif config.source.table.format.lower() == 'ascii': # ASCII will be supported! Promise,", "sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor] = PchipInterpolator(time, E,", "(a + 1.) * (E / Ea)) def parts_by_index(self, x, n): \"\"\"Returns a", "x.size: i_part += [ np.arange( len(i_part)*n, x.size ) ] # Ensure that last", "E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut = (a < 0)", "= E[-1].to('MeV').value cut = (0 < j) & (j < len(E)-1) j =", "Set up the distance model. distance_model = None dmtype = config.source.progenitor.distance.model if dmtype", "sn_data_table['ALPHA_{:s}'.format(fl)] elif fl == 'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV') alpha =", "energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching", "m<n ) if there are remaining elements of x. Returns ------- i_part :", "coding: utf-8 -*- \"\"\"CCSN neutrino sources. This module encapsulates the basic parameters of", "of neutrino energies to compute the energy PDF. Returns ------- spectrum : `numpy.ndarray`", "\"\"\" # Dictionary of L, <E>, and alpha versus time, keyed by neutrino", "(cdf[j+1] - cdf[j]) * (u[cut] - cdf[j]) energies[cut] = en return energies def", "= 0 return flux / u.s # Where the mean energy is not", "def parts_by_index(self, x, n): \"\"\"Returns a list of size-n numpy arrays containing indices", "of the product of lepton cross section with lepton mean energy and lepton", "self.energy_cdf = lambda a, Ea, E: \\ gdtr(1., a + 1., (a +", ".config import parse_quantity from astropy import units as u from astropy.table import Table", "fl = flavor.name.upper() if any( fl in col for col in sn_data_table.keys() ):", "like a gamma function, # parameterized by mean energy and pinch parameter alpha.", "= 3.053e28 # 1 / u.m**3 t = time.to(u.s).value Enu = E.to(u.MeV).value if", "in units neutrinos # per second, elsewhere, returns zero. # flux = np.ediff1d(t,", "------- E_per_V Energy per m**3 of ice deposited by neutrinos of requested flavor", "supernovae as modeled in the CCSN literature. For each species of neutrino one", "= PchipInterpolator(time, E, extrapolate=False) pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False ) elif config.source.table.format.lower() ==", "luminosity self.mean_energy = mean_energy self.pinch = pinch # Energy PDF function is assumed", "the energy spectrum of the neutrinos at any given time. \"\"\" from __future__", "sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time,", ">= 0) & (Ea > 0) E_pdf = np.zeros( (Enu.size, t.size), dtype =", "as np from scipy.special import loggamma, gdtr from scipy.interpolate import PchipInterpolator class Source:", "u.m**3 t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0: Enu[0] =", "else: if Ea <= 0.: return np.zeros_like(E) elif a <= 0.: return self.energy_pdf(0,", "of index partitions (partitions are numpy array). \"\"\" nParts = x.size//n i_part =", "E_pdf = np.zeros( (Enu.size, t.size), dtype = float ) E_pdf[:, cut] = self.v_energy_pdf(", "array. Returns ------- time : float Source time profile (units of s). \"\"\"", "# Energy CDF, useful for random energy sampling. self.energy_cdf = lambda a, Ea,", "luminosity : float Source luminosity (units of power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg", "Sorted grid of neutrino energies to compute the energy PDF. Returns ------- spectrum", "self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t, (list, tuple, np.ndarray)): # It is non-physical to have", "CCSN models. self.energy_pdf = lambda a, Ea, E: \\ np.exp((1 + a) *", "raise RuntimeError(\"Time array size <2, unable to compute energy per volume.\") for i_part", "Where the mean energy is not zero, return rate in units neutrinos #", "table: for flavor in Flavor: fl = flavor.name.upper() if any( fl in col", "Use simple 1D linear interpolation t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0]", "FITS file, which contains a luminosity table and a pinching # parameter (alpha)", "energy spectrum of the neutrinos at any given time. \"\"\" from __future__ import", "t, get current average energy and pinch parameter. # Use simple 1D linear", "parameterized by mean energy and pinch parameter alpha. True for # nearly all", "sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif fl == 'NU_X_BAR': L =", "< 2: i_part[-2] = np.append(i_part[-2], i_part[-1]) i_part = i_part[0:-1] return i_part def get_time(self):", "lmc, smc) else: raise ValueError('Unrecognized distance_model: {}'.format(dmtype)) return Source(config.source.name, config.source.model, parse_quantity(config.source.progenitor.mass), distance_model.distance(), time,", "energy. \"\"\" # Given t, get current average energy and pinch parameter. #", "i_part += [ np.arange( len(i_part)*n, x.size ) ] # Ensure that last partition", "parameter alpha. True for # nearly all CCSN models. self.energy_pdf = lambda a,", "energy samples to produce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- energies :", "n x time.size is created and can be very memory inefficient. Returns -------", "def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching paramter alpha at time t for", "if Ea <= 0.: return np.zeros_like(E) elif a <= 0.: return self.energy_pdf(0, Ea,", "of the CDF of the neutrino energy distribution. Parameters ---------- t : float", "> 0) E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1, 1)) return", "even if it is 1. E_per_V = np.zeros( time.size ) if time.size <", "= flavor.name.upper() if any( fl in col for col in sn_data_table.keys() ): L", "True for # nearly all CCSN models. self.energy_pdf = lambda a, Ea, E:", "it is 1. E_per_V = np.zeros( time.size ) if time.size < 2: raise", "energy and pinch parameter alpha. True for # nearly all CCSN models. self.energy_pdf", "source luminosity at time t for a given flavor. Parameters ---------- t :", ": `numpy.ndarray` Table of energies sampled from the energy spectrum. \"\"\" cdf =", ": float Time relative to core bounce (units seconds). flavor : :class:`asteria.neutrino.Flavor` Neutrino", "time={}, luminosity={}, mean_energy={}, pinch={}): self.name = name self.model = spectral_model self.progenitor_mass = progenitor_mass", "to the CDF # of the neutrino energy distribution at time t u", "energy distribution at time t u = np.random.uniform(n) j = np.searchsorted(cdf, u) #", "Returns ------- luminosity : float Source luminosity (units of power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t))", "config.source.table.format.lower() == 'ascii': # ASCII will be supported! Promise, promise. raise ValueError('Unsupported format:", "# Open FITS file, which contains a luminosity table and a pinching #", "parameters used to create a Source. Returns ------- Source An initialized source model.", "return a number when numpy size is called on them, even if it", "flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF of the neutrino energy distribution at time t. Parameters", "time, flavor ) # s**-1 if mixing is None: def nu_spectrum(t, E, flavor):", "lambda a, Ea, E: \\ gdtr(1., a + 1., (a + 1.) *", "time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0: Enu[0] = 1e-10 * u.MeV", "{1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='') # The following two lines exploit the fact that", "will be supported! Promise, promise. raise ValueError('Unsupported format: \"ASCII\"') else: raise ValueError('Unknown format", "compute the energy PDF. n : int Number of energy samples to produce.", "return self.energy_pdf(0, Ea, E.value).real else: return self.energy_pdf(a, Ea, E.value).real def sample_energies(self, t, E,", "< j) & (j < len(E)-1) j = j[cut] en = E[j] +", "a, Ea, E: \\ np.exp((1 + a) * np.log(1 + a) - loggamma(1", "= parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r, dr) elif dmtype == 'StellarDensity':", "Parameters ---------- config : :class:`asteria.config.Configuration` Configuration parameters used to create a Source. Returns", "0 return flux / u.s # Where the mean energy is not zero,", "= np.random.uniform(n) j = np.searchsorted(cdf, u) # Linearly interpolate in the CDF to", "== 'fits': # Open FITS file, which contains a luminosity table and a", "# per second, elsewhere, returns zero. # flux = np.ediff1d(t, to_end=(t[-1] - t[-2]))", "= progenitor_mass self.progenitor_distance = progenitor_distance self.time = time self.luminosity = luminosity self.mean_energy =", "# s**-1 if mixing is None: def nu_spectrum(t, E, flavor): return self.energy_spectrum(t, E,", "np.zeros_like(E) elif a <= 0.: return self.energy_pdf(0, Ea, E.value).real else: return self.energy_pdf(a, Ea,", "elements of x, and one size-m array ( with m<n ) if there", "neutrino energies at some time t for a particular neutrino flavor. The energies", "import PchipInterpolator class Source: def __init__(self, name, spectral_model, progenitor_mass, progenitor_distance, time={}, luminosity={}, mean_energy={},", "distance_model = StellarDensity(fitsfile, lmc, smc) else: raise ValueError('Unrecognized distance_model: {}'.format(dmtype)) return Source(config.source.name, config.source.model,", "u.MeV a = self.get_pinch_parameter(t, flavor) Ea = self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t, (list, tuple,", "still have this a[a<0] = 0 cut = (a >= 0) & (Ea", "zero. # flux = np.ediff1d(t, to_end=(t[-1] - t[-2])) * rate # # return", "count of neutrinos). \"\"\" t = time.to(u.s).value luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy =", "# nearly all CCSN models. self.energy_pdf = lambda a, Ea, E: \\ np.exp((1", "H2O_in_ice / ( 4 * np.pi * dist**2) * np.ediff1d(t, to_end=(t[-1] - t[-2]))", "up the distance model. distance_model = None dmtype = config.source.progenitor.distance.model if dmtype ==", "interactions. Parameters ---------- time : float (units s) Time relative to core bounce.", "phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist = self.progenitor_distance.to(u.m).value # m**2 flux = self.get_flux(", "parameter E n : int Maximum number of time steps to compute at", "(E[j+1] - E[j]) / (cdf[j+1] - cdf[j]) * (u[cut] - cdf[j]) energies[cut] =", "+ (E[j+1] - E[j]) / (cdf[j+1] - cdf[j]) * (u[cut] - cdf[j]) energies[cut]", "ASCII will be supported! Promise, promise. raise ValueError('Unsupported format: \"ASCII\"') else: raise ValueError('Unknown", "= 1e-10 # u.MeV a = self.get_pinch_parameter(t, flavor) Ea = self.get_mean_energy(t, flavor).to(u.MeV).value if", "elif fl == 'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X']", "m**2 dist = self.progenitor_distance.to(u.m).value # m**2 flux = self.get_flux( time, flavor ) #", "# ASCII will be supported! Promise, promise. raise ValueError('Unsupported format: \"ASCII\"') else: raise", "flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- mean_energy : float Source mean energy", "size <2, unable to compute energy per volume.\") for i_part in self.parts_by_index(time, n):", "0.: flux = luminosity / mean_energy else: flux = 0 return flux /", "time t. Parameters ---------- t : float Time relative to core bounce. flavor", "1. E_per_V = np.zeros( time.size ) if time.size < 2: raise RuntimeError(\"Time array", "numpy array of size n x time.size is created and can be very", "a function of energy. \"\"\" # Given t, get current average energy and", "self.energy_cdf(flavor, t, E) energies = np.zeros(n, dtype=float) # Generate a random number between", "model from configuration parameters. Parameters ---------- config : :class:`asteria.config.Configuration` Configuration parameters used to", "FixedDistance(r, dr) elif dmtype == 'StellarDensity': # StellarDensity model, with options to add", "per m**3 of ice deposited by neutrinos of requested flavor \"\"\" H2O_in_ice =", "): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif fl ==", "flavor in Flavor: fl = flavor.name.upper() if any( fl in col for col", "energy energies[j <= 0] = E[0].to('MeV').value energies[j >= len(E)-1] = E[-1].to('MeV').value cut =", "always return a number when numpy size is called on them, even if", "isinstance(t, (list, tuple, np.ndarray)): # It is non-physical to have a<0 but some", "np.exp((1 + a) * np.log(1 + a) - loggamma(1 + a) + a", "np.searchsorted(cdf, u) # Linearly interpolate in the CDF to produce a random energy", "and pinch parameter alpha. True for # nearly all CCSN models. self.energy_pdf =", "s**-1 if mixing is None: def nu_spectrum(t, E, flavor): return self.energy_spectrum(t, E, flavor)", "and alpha versus time, keyed by neutrino flavor. luminosity, mean_energy, pinch = {},", "config.source.table.path]) sn_data_table = Table.read(fitsfile) time = sn_data_table['TIME'].to('s') # Loop over all flavors in", "energies flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum : `numpy.ndarray` (Units vary, m**2) Grid", "Generate final partition of size <n if x.size is not multiple of n", "function, # parameterized by mean energy and pinch parameter alpha. True for #", "for col in sn_data_table.keys() ): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha =", "i_part = i_part[0:-1] return i_part def get_time(self): \"\"\"Return source time as numpy array.", "to add LMC and SMC. fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc", "self.luminosity = luminosity self.mean_energy = mean_energy self.pinch = pinch # Energy PDF function", "\"\"\"Initialize a Source model from configuration parameters. Parameters ---------- config : :class:`asteria.config.Configuration` Configuration", "int Number of energy samples to produce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns", "r = parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r, dr) elif dmtype ==", "quantities will # always return a number when numpy size is called on", "return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return source flux at time t for", "power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s) def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return", "- (1 + a) * (E / Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)'", ": float Source number flux (unit-less, count of neutrinos). \"\"\" t = time.to(u.s).value", "if not flavor.is_electron: E_per_V *= 2 print('Completed') return E_per_V * u.MeV / u.m**3", "by neutrino flavor. luminosity, mean_energy, pinch = {}, {}, {} if config.source.table.format.lower() ==", "in a cubic meter of ice by photons from SN neutrino interactions. Parameters", "`numpy.ndarray` Table of energies sampled from the energy spectrum. \"\"\" cdf = self.energy_cdf(flavor,", "in the table: for flavor in Flavor: fl = flavor.name.upper() if any( fl", "Neutrino flavor. Returns ------- pinch : float Source pinching parameter (unitless). \"\"\" return", "np.divide(luminosity, mean_energy, where=(mean_energy > 0), out=np.zeros(len(luminosity))) else: if mean_energy > 0.: flux =", "CDF of the neutrino energy distribution. Parameters ---------- t : float Time relative", "a given flavor. Parameters ---------- t : float Time relative to core bounce", "the energy deposited in a cubic meter of ice by photons from SN", "elif a <= 0.: return self.energy_pdf(0, Ea, E.value).real else: return self.energy_pdf(a, Ea, E.value).real", "containing indices for the elements of x, and one size-m array ( with", "alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif fl == 'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV')", "np.log(1 + a) - loggamma(1 + a) + a * np.log(E) - \\", "if there are remaining elements of x. Returns ------- i_part : list List", "neutrino flavor. luminosity, mean_energy, pinch = {}, {}, {} if config.source.table.format.lower() == 'fits':", "Source luminosity (units of power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s) def", "size-n numpy arrays containing indices for the elements of x, and one size-m", "interpolate in the CDF to produce a random energy energies[j <= 0] =", "average energy and pinch parameter. # Use simple 1D linear interpolation t =", "neutrino sources. This module encapsulates the basic parameters of neutrino fluxes from supernovae", "Returns ------- mean_energy : float Source mean energy (units of energy). \"\"\" return", "cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut = (a < 0) &", "n=1000): \"\"\"Compute the energy deposited in a cubic meter of ice by photons", "if isinstance(t, (list, tuple, np.ndarray)): # It is non-physical to have a<0 but", "self.time def get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity at time t for a", "KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False) pinch[flavor] =", "SMC. fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model =", "of L, <E>, and alpha versus time, keyed by neutrino flavor. luminosity, mean_energy,", "if mean_energy > 0.: flux = luminosity / mean_energy else: flux = 0", "from abc import ABC, abstractmethod import numpy as np from scipy.special import loggamma,", "t, flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity at time t for a given flavor. Parameters", "self.get_flux( time, flavor ) # s**-1 if mixing is None: def nu_spectrum(t, E,", "print('Beginning {0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='') # The following two lines exploit", "isinstance(t, (list, tuple, np.ndarray)): flux = np.divide(luminosity, mean_energy, where=(mean_energy > 0), out=np.zeros(len(luminosity))) else:", "\"\"\"Generate a random sample of neutrino energies at some time t for a", "snewpy.neutrino import Flavor from .stellardist import FixedDistance, StellarDensity from .config import parse_quantity from", "1e-10 * u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist = self.progenitor_distance.to(u.m).value # m**2", "'/'.join([config.abs_base_path, config.source.table.path]) sn_data_table = Table.read(fitsfile) time = sn_data_table['TIME'].to('s') # Loop over all flavors", "keyed by neutrino flavor. luminosity, mean_energy, pinch = {}, {}, {} if config.source.table.format.lower()", ":class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum : `numpy.ndarray` (Units vary, m**2) Grid of the product", "Loop over all flavors in the table: for flavor in Flavor: fl =", "m**2) Grid of the product of lepton cross section with lepton mean energy", "= spectral_model self.progenitor_mass = progenitor_mass self.progenitor_distance = progenitor_distance self.time = time self.luminosity =", "if len(i_part[-1]) < 2: i_part[-2] = np.append(i_part[-2], i_part[-1]) i_part = i_part[0:-1] return i_part", "flux at time t for a given flavor. Parameters ---------- t : float", "np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return source flux at time t for a", "E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif fl == 'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s')", "flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum : `numpy.ndarray` (Units vary, m**2) Grid of", "\"\"\" from __future__ import print_function, division from snewpy.neutrino import Flavor from .stellardist import", "species of neutrino one requires an estimate of the luminosity vs. time as", "a = self.get_pinch_parameter(t, flavor) Ea = self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t, (list, tuple, np.ndarray)):", "len(E)-1) j = j[cut] en = E[j] + (E[j+1] - E[j]) / (cdf[j+1]", "for i in range(nParts) ] # Generate final partition of size <n if", "distance_model = None dmtype = config.source.progenitor.distance.model if dmtype == 'FixedDistance': # FixedDistance model.", "Time relative to core bounce. E : `numpy.ndarray` Sorted grid of neutrino energies", "(Enu.size, t.size), dtype = float ) E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\", "parameters of neutrino fluxes from supernovae as modeled in the CCSN literature. For", ":class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- pinch : float Source pinching parameter (unitless). \"\"\"", "Enu, axis=0) E_per_V *= H2O_in_ice / ( 4 * np.pi * dist**2) *", "dist = self.progenitor_distance.to(u.m).value # m**2 flux = self.get_flux( time, flavor ) # s**-1", "np.log(Ea) - (1 + a) * (E / Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'],", "to_end=(t[-1] - t[-2])) * rate # # return flux def energy_spectrum(self, time, E,", "the energy spectrum. \"\"\" cdf = self.energy_cdf(flavor, t, E) energies = np.zeros(n, dtype=float)", "`numpy.ndarray` Table of PDF values computed as a function of energy. \"\"\" #", "sorted according to parameter E n : int Maximum number of time steps", "them, even if it is 1. E_per_V = np.zeros( time.size ) if time.size", "lambda a, Ea, E: \\ np.exp((1 + a) * np.log(1 + a) -", ") for i in range(nParts) ] # Generate final partition of size <n", "return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s) def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source mean", "model. r = parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r, dr) elif dmtype", "of energies sampled from the energy spectrum. \"\"\" cdf = self.energy_cdf(flavor, t, E)", "else: nu_spectrum = mixing(self) print('Beginning {0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='') # The", "\"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching paramter", "energies to compute the energy PDF. n : int Number of energy samples", "return energies def photonic_energy_per_vol(self, time, E, flavor, photon_spectrum, mixing=None, n=1000): \"\"\"Compute the energy", "np.log(E) - \\ (1 + a) * np.log(Ea) - (1 + a) *", "energies[cut] = en return energies def photonic_energy_per_vol(self, time, E, flavor, photon_spectrum, mixing=None, n=1000):", "# flux = np.ediff1d(t, to_end=(t[-1] - t[-2])) * rate # # return flux", "& (Ea > 0) E_pdf = np.zeros( (Enu.size, t.size), dtype = float )", "2 or more elements if len(i_part[-1]) < 2: i_part[-2] = np.append(i_part[-2], i_part[-1]) i_part", "np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) # Energy CDF, useful for random energy sampling. self.energy_cdf", "energy deposited in a cubic meter of ice by photons from SN neutrino", "abc import ABC, abstractmethod import numpy as np from scipy.special import loggamma, gdtr", "= 0 cut = (a >= 0) & (Ea > 0) E_pdf =", "= en return energies def photonic_energy_per_vol(self, time, E, flavor, photon_spectrum, mixing=None, n=1000): \"\"\"Compute", "energy at time t for a given flavor. Parameters ---------- t : float", "= parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r, dr) elif dmtype == 'StellarDensity': # StellarDensity model,", "flavor. Parameters ---------- t : float Time relative to core bounce. flavor :", "< len(E)-1) j = j[cut] en = E[j] + (E[j+1] - E[j]) /", "# Linearly interpolate in the CDF to produce a random energy energies[j <=", "CDF to produce a random energy energies[j <= 0] = E[0].to('MeV').value energies[j >=", "well as the energy spectrum of the neutrinos at any given time. \"\"\"", "but some model files/interpolations still have this a[a<0] = 0 cut = (a", "nu_spectrum(t, E, flavor): return self.energy_spectrum(t, E, flavor) * self.get_flux(t, flavor) else: nu_spectrum =", "- \\ (1 + a) * np.log(Ea) - (1 + a) * (E", "the neutrinos at any given time. \"\"\" from __future__ import print_function, division from", "at any given time. \"\"\" from __future__ import print_function, division from snewpy.neutrino import", "1e-10 # u.MeV a = self.get_pinch_parameter(t, flavor) Ea = self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t,", "models. self.energy_pdf = lambda a, Ea, E: \\ np.exp((1 + a) * np.log(1", "= progenitor_distance self.time = time self.luminosity = luminosity self.mean_energy = mean_energy self.pinch =", "from astropy import units as u from astropy.table import Table from abc import", "units neutrinos # per second, elsewhere, returns zero. # flux = np.ediff1d(t, to_end=(t[-1]", "(0 < j) & (j < len(E)-1) j = j[cut] en = E[j]", "the product of lepton cross section with lepton mean energy and lepton path", "float Source pinching parameter (unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return", "gamma function, # parameterized by mean energy and pinch parameter alpha. True for", "neutrino one requires an estimate of the luminosity vs. time as well as", "self.pinch = pinch # Energy PDF function is assumed to be like a", "grid of neutrino energies flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum : `numpy.ndarray` (Units", "'StellarDensity': # StellarDensity model, with options to add LMC and SMC. fitsfile =", "is 1. E_per_V = np.zeros( time.size ) if time.size < 2: raise RuntimeError(\"Time", "always has 2 or more elements if len(i_part[-1]) < 2: i_part[-2] = np.append(i_part[-2],", "else: raise ValueError('Unknown format {}'.format(config.source.table.format)) # Set up the distance model. distance_model =", "of the neutrinos at any given time. \"\"\" from __future__ import print_function, division", "= self.get_mean_energy(t, flavor).to(u.MeV).value if isinstance(t, (list, tuple, np.ndarray)): # It is non-physical to", "ice deposited by neutrinos of requested flavor \"\"\" H2O_in_ice = 3.053e28 # 1", "the basic parameters of neutrino fluxes from supernovae as modeled in the CCSN", "[ np.arange( len(i_part)*n, x.size ) ] # Ensure that last partition always has", "(units s) Time relative to core bounce. E : `numpy.ndarray` Sorted grid of", "Maximum number of time steps to compute at once. A temporary numpy array", "of x. Returns ------- i_part : list List of index partitions (partitions are", "def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source mean energy at time t for a", "= sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False)", "+ a) * np.log(Ea) - (1 + a) * (E / Ea)) self.v_energy_pdf", "E_per_V Energy per m**3 of ice deposited by neutrinos of requested flavor \"\"\"", "Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- mean_energy", "Returns ------- time : float Source time profile (units of s). \"\"\" return", "Table of PDF values computed as a function of energy. \"\"\" # Given", "lepton path length per MeV, sorted according to parameter E n : int", "+= np.trapz( nu_spectrum(time[i_part], E, flavor).value * phot, Enu, axis=0) E_per_V *= H2O_in_ice /", "dtype = float ) E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut", "A temporary numpy array of size n x time.size is created and can", "to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- pinch : float", "1 and compare to the CDF # of the neutrino energy distribution at", "t, E) energies = np.zeros(n, dtype=float) # Generate a random number between 0", "time = sn_data_table['TIME'].to('s') # Loop over all flavors in the table: for flavor", "else: if mean_energy > 0.: flux = luminosity / mean_energy else: flux =", "time t u = np.random.uniform(n) j = np.searchsorted(cdf, u) # Linearly interpolate in", "bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. E : `numpy.ndarray` Sorted grid of neutrino", "None dmtype = config.source.progenitor.distance.model if dmtype == 'FixedDistance': # FixedDistance model. r =", "self.progenitor_mass = progenitor_mass self.progenitor_distance = progenitor_distance self.time = time self.luminosity = luminosity self.mean_energy", "function is assumed to be like a gamma function, # parameterized by mean", "# Energy PDF function is assumed to be like a gamma function, #", "Generate a random number between 0 and 1 and compare to the CDF", ") if time.size < 2: raise RuntimeError(\"Time array size <2, unable to compute", "t : float Time relative to core bounce. E : `numpy.ndarray` Sorted grid", "be very memory inefficient. Returns ------- E_per_V Energy per m**3 of ice deposited", "np.zeros(n, dtype=float) # Generate a random number between 0 and 1 and compare", "energy (units of energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR):", "E_per_V = np.zeros( time.size ) if time.size < 2: raise RuntimeError(\"Time array size", "following two lines exploit the fact that astropy quantities will # always return", "E_pdf else: if Ea <= 0.: return np.zeros_like(E) elif a <= 0.: return", "mean_energy={}, pinch={}): self.name = name self.model = spectral_model self.progenitor_mass = progenitor_mass self.progenitor_distance =", "t for a given flavor. Parameters ---------- t : float Time relative to", "configuration parameters. Parameters ---------- config : :class:`asteria.config.Configuration` Configuration parameters used to create a", "config.source.progenitor.distance.model if dmtype == 'FixedDistance': # FixedDistance model. r = parse_quantity(config.source.progenitor.distance.distance) dr =", "j = j[cut] en = E[j] + (E[j+1] - E[j]) / (cdf[j+1] -", "__init__(self, name, spectral_model, progenitor_mass, progenitor_distance, time={}, luminosity={}, mean_energy={}, pinch={}): self.name = name self.model", "Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) # Energy CDF, useful for random", "per second, elsewhere, returns zero. # flux = np.ediff1d(t, to_end=(t[-1] - t[-2])) *", "numpy as np from scipy.special import loggamma, gdtr from scipy.interpolate import PchipInterpolator class", "u.s) def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source mean energy at time t for", "t. Parameters ---------- t : float Time relative to core bounce. flavor :", "# The following two lines exploit the fact that astropy quantities will #", "parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile, lmc, smc) else: raise ValueError('Unrecognized distance_model:", "print_function, division from snewpy.neutrino import Flavor from .stellardist import FixedDistance, StellarDensity from .config", "The energies are generated via inverse transform sampling of the CDF of the", "time profile (units of s). \"\"\" return self.time def get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return", "array of size n x time.size is created and can be very memory", "= mixing(self) print('Beginning {0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='') # The following two", "exploit the fact that astropy quantities will # always return a number when", "energies to compute the energy PDF. Returns ------- spectrum : `numpy.ndarray` Table of", "a Source. Returns ------- Source An initialized source model. \"\"\" # Dictionary of", "2 print('Completed') return E_per_V * u.MeV / u.m**3 def initialize(config): \"\"\"Initialize a Source", "def get_time(self): \"\"\"Return source time as numpy array. Returns ------- time : float", "a + 1., (a + 1.) * (E / Ea)) def parts_by_index(self, x,", "computed as a function of energy. \"\"\" # Given t, get current average", "-*- \"\"\"CCSN neutrino sources. This module encapsulates the basic parameters of neutrino fluxes", "cross section with lepton mean energy and lepton path length per MeV, sorted", "flavor=Flavor.NU_E_BAR): \"\"\"Return source flux at time t for a given flavor. Parameters ----------", "= self.get_flux( time, flavor ) # s**-1 if mixing is None: def nu_spectrum(t,", "= self.progenitor_distance.to(u.m).value # m**2 flux = self.get_flux( time, flavor ) # s**-1 if", "t = time.to(u.s).value luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t, flavor).value if isinstance(t,", "Table.read(fitsfile) time = sn_data_table['TIME'].to('s') # Loop over all flavors in the table: for", "of neutrino one requires an estimate of the luminosity vs. time as well", "transform sampling of the CDF of the neutrino energy distribution. Parameters ---------- t", "'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl))", ":class:`asteria.neutrino.Flavor` Neutrino flavor. E : `numpy.ndarray` Sorted grid of neutrino energies to compute", "= StellarDensity(fitsfile, lmc, smc) else: raise ValueError('Unrecognized distance_model: {}'.format(dmtype)) return Source(config.source.name, config.source.model, parse_quantity(config.source.progenitor.mass),", "elements of x. Returns ------- i_part : list List of index partitions (partitions", ": :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- mean_energy : float Source mean energy (units", "neutrino energies to compute the energy PDF. Returns ------- spectrum : `numpy.ndarray` Table", "flavor. E : `numpy.ndarray` Sorted grid of neutrino energies to compute the energy", "(j < len(E)-1) j = j[cut] en = E[j] + (E[j+1] - E[j])", "in sn_data_table.keys() ): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif", "at time t u = np.random.uniform(n) j = np.searchsorted(cdf, u) # Linearly interpolate", "= (a < 0) & (Ea > 0) E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1),", "E=Enu.reshape(-1,1)) cut = (a < 0) & (Ea > 0) E_pdf[:, cut] =", "return flux / u.s # Where the mean energy is not zero, return", "<2, unable to compute energy per volume.\") for i_part in self.parts_by_index(time, n): #", "distribution. Parameters ---------- t : float Time relative to core bounce. E :", "t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0: Enu[0] = 1e-10", "== 'StellarDensity': # StellarDensity model, with options to add LMC and SMC. fitsfile", "x.size//n i_part = [ np.arange( i*n, (i+1)*n ) for i in range(nParts) ]", "have this a[a<0] = 0 cut = (a >= 0) & (Ea >", "= None dmtype = config.source.progenitor.distance.model if dmtype == 'FixedDistance': # FixedDistance model. r", "energy and lepton path length per MeV, sorted according to parameter E n", "all CCSN models. self.energy_pdf = lambda a, Ea, E: \\ np.exp((1 + a)", "and can be very memory inefficient. Returns ------- E_per_V Energy per m**3 of", "List of index partitions (partitions are numpy array). \"\"\" nParts = x.size//n i_part", "get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source mean energy at time t for a given", "seconds). flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- flux : float Source number", "the luminosity vs. time as well as the energy spectrum of the neutrinos", "\"\"\" cdf = self.energy_cdf(flavor, t, E) energies = np.zeros(n, dtype=float) # Generate a", "+ a) * (E / Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) #", "time.size ) if time.size < 2: raise RuntimeError(\"Time array size <2, unable to", "/ u.s) def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source mean energy at time t", "== 'FixedDistance': # FixedDistance model. r = parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model =", "\"\"\" return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return source flux at time t", "# Use simple 1D linear interpolation t = time.to(u.s).value Enu = E.to(u.MeV).value if", "= self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut = (a < 0) & (Ea", "flavor.is_electron: E_per_V *= 2 print('Completed') return E_per_V * u.MeV / u.m**3 def initialize(config):", "FixedDistance, StellarDensity from .config import parse_quantity from astropy import units as u from", "= parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile, lmc, smc) else: raise ValueError('Unrecognized distance_model: {}'.format(dmtype)) return", "np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source pinching paramter alpha at", "Neutrino flavor. Returns ------- luminosity : float Source luminosity (units of power). \"\"\"", "`numpy.ndarray` Sorted grid of neutrino energies to compute the energy PDF. n :", "(i+1)*n ) for i in range(nParts) ] # Generate final partition of size", "if x.size is not multiple of n if len(i_part)*n != x.size: i_part +=", "(1 + a) * (E / Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' )", "not zero, return rate in units neutrinos # per second, elsewhere, returns zero.", "0) E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1, 1)) return E_pdf", "PDF function is assumed to be like a gamma function, # parameterized by", "table. fitsfile = '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table = Table.read(fitsfile) time = sn_data_table['TIME'].to('s') # Loop", "on them, even if it is 1. E_per_V = np.zeros( time.size ) if", "+ a) * np.log(1 + a) - loggamma(1 + a) + a *", "dmtype == 'StellarDensity': # StellarDensity model, with options to add LMC and SMC.", "mean energy (units of energy). \"\"\" return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV def get_pinch_parameter(self, t,", "Promise, promise. raise ValueError('Unsupported format: \"ASCII\"') else: raise ValueError('Unknown format {}'.format(config.source.table.format)) # Set", "= lambda a, Ea, E: \\ gdtr(1., a + 1., (a + 1.)", "flavor \"\"\" H2O_in_ice = 3.053e28 # 1 / u.m**3 t = time.to(u.s).value Enu", "is called on them, even if it is 1. E_per_V = np.zeros( time.size", "rate in units neutrinos # per second, elsewhere, returns zero. # flux =", "1D linear interpolation t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0.:", "bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- luminosity : float Source luminosity", "np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s) def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source mean energy", "division from snewpy.neutrino import Flavor from .stellardist import FixedDistance, StellarDensity from .config import", "cdf = self.energy_cdf(flavor, t, E) energies = np.zeros(n, dtype=float) # Generate a random", "elif config.source.table.format.lower() == 'ascii': # ASCII will be supported! Promise, promise. raise ValueError('Unsupported", "PchipInterpolator class Source: def __init__(self, name, spectral_model, progenitor_mass, progenitor_distance, time={}, luminosity={}, mean_energy={}, pinch={}):", "E.to(u.MeV).value if Enu[0] == 0.: Enu[0] = 1e-10 # u.MeV a = self.get_pinch_parameter(t,", "to parameter E n : int Maximum number of time steps to compute", "the elements of x, and one size-m array ( with m<n ) if", "= luminosity / mean_energy else: flux = 0 return flux / u.s #", "get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return source flux at time t for a given flavor.", "def sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a random sample of neutrino energies", "number of time steps to compute at once. A temporary numpy array of", "is not zero, return rate in units neutrinos # per second, elsewhere, returns", "= sn_data_table['L_NU_X'].to('erg/s') E = sn_data_table['E_NU_X'].to('MeV') alpha = sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] =", "raise ValueError('Unsupported format: \"ASCII\"') else: raise ValueError('Unknown format {}'.format(config.source.table.format)) # Set up the", "vary, m**2) Grid of the product of lepton cross section with lepton mean", "1)) return E_pdf else: if Ea <= 0.: return np.zeros_like(E) elif a <=", "file, which contains a luminosity table and a pinching # parameter (alpha) and", "------- energies : `numpy.ndarray` Table of energies sampled from the energy spectrum. \"\"\"", "int Maximum number of time steps to compute at once. A temporary numpy", "spectral_model self.progenitor_mass = progenitor_mass self.progenitor_distance = progenitor_distance self.time = time self.luminosity = luminosity", "of power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s) def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR):", "paramter alpha at time t for a given flavor. Parameters ---------- t :", "useful for random energy sampling. self.energy_cdf = lambda a, Ea, E: \\ gdtr(1.,", "requires an estimate of the luminosity vs. time as well as the energy", "array ( with m<n ) if there are remaining elements of x. Returns", "CDF # of the neutrino energy distribution at time t u = np.random.uniform(n)", "parameter (alpha) and mean energy table. fitsfile = '/'.join([config.abs_base_path, config.source.table.path]) sn_data_table = Table.read(fitsfile)", "called on them, even if it is 1. E_per_V = np.zeros( time.size )", "# Generate a random number between 0 and 1 and compare to the", "Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- pinch", "* self.get_flux(t, flavor) else: nu_spectrum = mixing(self) print('Beginning {0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))),", "PDF values computed as a function of energy. \"\"\" # Given t, get", "1 / u.m**3 t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0:", "This module encapsulates the basic parameters of neutrino fluxes from supernovae as modeled", "x, and one size-m array ( with m<n ) if there are remaining", "parameters. Parameters ---------- config : :class:`asteria.config.Configuration` Configuration parameters used to create a Source.", "j[cut] en = E[j] + (E[j+1] - E[j]) / (cdf[j+1] - cdf[j]) *", "# # return flux def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF of", "-1), \\ E=Enu.reshape(-1, 1)) return E_pdf else: if Ea <= 0.: return np.zeros_like(E)", "LMC and SMC. fitsfile = '/'.join([config.abs_base_path, config.source.progenitor.distance.path]) lmc = parse_quantity(config.source.progenitor.distance.add_LMC) smc = parse_quantity(config.source.progenitor.distance.add_SMC)", "of s). \"\"\" return self.time def get_luminosity(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source luminosity at", "name, spectral_model, progenitor_mass, progenitor_distance, time={}, luminosity={}, mean_energy={}, pinch={}): self.name = name self.model =", "np.pi * dist**2) * np.ediff1d(t, to_end=(t[-1] - t[-2])) if not flavor.is_electron: E_per_V *=", "\"\"\" return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s) def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR): \"\"\"Return source", "{}'.format(config.source.table.format)) # Set up the distance model. distance_model = None dmtype = config.source.progenitor.distance.model", "The following two lines exploit the fact that astropy quantities will # always", "excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) # Energy CDF, useful for random energy sampling. self.energy_cdf =", "flavor. luminosity, mean_energy, pinch = {}, {}, {} if config.source.table.format.lower() == 'fits': #", "neutrino energies flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. photon_spectrum : `numpy.ndarray` (Units vary, m**2)", "Parameters ---------- t : float Time relative to core bounce (units seconds). flavor", "mean_energy self.pinch = pinch # Energy PDF function is assumed to be like", "of time steps to compute at once. A temporary numpy array of size", "lepton cross section with lepton mean energy and lepton path length per MeV,", "-*- coding: utf-8 -*- \"\"\"CCSN neutrino sources. This module encapsulates the basic parameters", "energy PDF. Returns ------- spectrum : `numpy.ndarray` Table of PDF values computed as", "E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a random sample of neutrino energies at some time", "utf-8 -*- \"\"\"CCSN neutrino sources. This module encapsulates the basic parameters of neutrino", "\"\"\" H2O_in_ice = 3.053e28 # 1 / u.m**3 t = time.to(u.s).value Enu =", "\"\"\" # Given t, get current average energy and pinch parameter. # Use", "E[j]) / (cdf[j+1] - cdf[j]) * (u[cut] - cdf[j]) energies[cut] = en return", "E) energies = np.zeros(n, dtype=float) # Generate a random number between 0 and", "tuple, np.ndarray)): # It is non-physical to have a<0 but some model files/interpolations", "of the neutrino energy distribution at time t u = np.random.uniform(n) j =", "one requires an estimate of the luminosity vs. time as well as the", "from configuration parameters. Parameters ---------- config : :class:`asteria.config.Configuration` Configuration parameters used to create", "t : float Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor.", "flavor=Flavor.NU_E_BAR): \"\"\"Return source mean energy at time t for a given flavor. Parameters", "flavor).value if isinstance(t, (list, tuple, np.ndarray)): flux = np.divide(luminosity, mean_energy, where=(mean_energy > 0),", "energies at some time t for a particular neutrino flavor. The energies are", "(units of power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s) def get_mean_energy(self, t,", "smc = parse_quantity(config.source.progenitor.distance.add_SMC) distance_model = StellarDensity(fitsfile, lmc, smc) else: raise ValueError('Unrecognized distance_model: {}'.format(dmtype))", "Returns ------- i_part : list List of index partitions (partitions are numpy array).", "flavor. Returns ------- pinch : float Source pinching parameter (unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t))", "= sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)] elif fl == 'NU_X_BAR': L = sn_data_table['L_NU_X'].to('erg/s') E", "size-m array ( with m<n ) if there are remaining elements of x.", "energy PDF. n : int Number of energy samples to produce. flavor :", "flux / u.s # Where the mean energy is not zero, return rate", "import numpy as np from scipy.special import loggamma, gdtr from scipy.interpolate import PchipInterpolator", "flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- flux : float Source number flux", "def get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return source flux at time t for a given", "E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF of the neutrino energy distribution at time t.", "flux = self.get_flux( time, flavor ) # s**-1 if mixing is None: def", "n if len(i_part)*n != x.size: i_part += [ np.arange( len(i_part)*n, x.size ) ]", "if it is 1. E_per_V = np.zeros( time.size ) if time.size < 2:", "product of lepton cross section with lepton mean energy and lepton path length", "a pinching # parameter (alpha) and mean energy table. fitsfile = '/'.join([config.abs_base_path, config.source.table.path])", "returns zero. # flux = np.ediff1d(t, to_end=(t[-1] - t[-2])) * rate # #", "format {}'.format(config.source.table.format)) # Set up the distance model. distance_model = None dmtype =", ": float Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns", "= E[j] + (E[j+1] - E[j]) / (cdf[j+1] - cdf[j]) * (u[cut] -", "meter of ice by photons from SN neutrino interactions. Parameters ---------- time :", "fact that astropy quantities will # always return a number when numpy size", "= mean_energy self.pinch = pinch # Energy PDF function is assumed to be", "Limits memory usage E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E, flavor).value * phot, Enu, axis=0)", "E n : int Maximum number of time steps to compute at once.", "tuple, np.ndarray)): flux = np.divide(luminosity, mean_energy, where=(mean_energy > 0), out=np.zeros(len(luminosity))) else: if mean_energy", "< 0) & (Ea > 0) E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1),", "return i_part def get_time(self): \"\"\"Return source time as numpy array. Returns ------- time", "float Time relative to core bounce. E : `numpy.ndarray` Sorted grid of neutrino", "and 1 and compare to the CDF # of the neutrino energy distribution", "Ea)) def parts_by_index(self, x, n): \"\"\"Returns a list of size-n numpy arrays containing", "x. Returns ------- i_part : list List of index partitions (partitions are numpy", "from astropy.table import Table from abc import ABC, abstractmethod import numpy as np", "E_per_V * u.MeV / u.m**3 def initialize(config): \"\"\"Initialize a Source model from configuration", "parse_quantity(config.source.progenitor.distance.distance) dr = parse_quantity(config.source.progenitor.distance.uncertainty) distance_model = FixedDistance(r, dr) elif dmtype == 'StellarDensity': #", "number when numpy size is called on them, even if it is 1.", "cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \\ E=Enu.reshape(-1, 1)) return E_pdf else: if", "photon_spectrum, mixing=None, n=1000): \"\"\"Compute the energy deposited in a cubic meter of ice", "= np.zeros( (Enu.size, t.size), dtype = float ) E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1),", "units as u from astropy.table import Table from abc import ABC, abstractmethod import", "t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0.: Enu[0] = 1e-10", "luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t, flavor).value if isinstance(t, (list, tuple, np.ndarray)):", "# Ensure that last partition always has 2 or more elements if len(i_part[-1])", "\"\"\"Return source luminosity at time t for a given flavor. Parameters ---------- t", "cdf[j]) energies[cut] = en return energies def photonic_energy_per_vol(self, time, E, flavor, photon_spectrum, mixing=None,", "(Units vary, m**2) Grid of the product of lepton cross section with lepton", "Returns ------- spectrum : `numpy.ndarray` Table of PDF values computed as a function", "E=Enu.reshape(-1, 1)) return E_pdf else: if Ea <= 0.: return np.zeros_like(E) elif a", "as a function of energy. \"\"\" # Given t, get current average energy", "from supernovae as modeled in the CCSN literature. For each species of neutrino", "Parameters ---------- t : float Time relative to core bounce. E : `numpy.ndarray`", "np.random.uniform(n) j = np.searchsorted(cdf, u) # Linearly interpolate in the CDF to produce", "0.: return self.energy_pdf(0, Ea, E.value).real else: return self.energy_pdf(a, Ea, E.value).real def sample_energies(self, t,", "with lepton mean energy and lepton path length per MeV, sorted according to", "there are remaining elements of x. Returns ------- i_part : list List of", "import FixedDistance, StellarDensity from .config import parse_quantity from astropy import units as u", "compute the energy PDF. Returns ------- spectrum : `numpy.ndarray` Table of PDF values", "i_part in self.parts_by_index(time, n): # Limits memory usage E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E,", "to produce a random energy energies[j <= 0] = E[0].to('MeV').value energies[j >= len(E)-1]", "per MeV, sorted according to parameter E n : int Maximum number of", ": :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- luminosity : float Source luminosity (units of", ") # Energy CDF, useful for random energy sampling. self.energy_cdf = lambda a,", "return np.zeros_like(E) elif a <= 0.: return self.energy_pdf(0, Ea, E.value).real else: return self.energy_pdf(a,", "parameter. # Use simple 1D linear interpolation t = time.to(u.s).value Enu = E.to(u.MeV).value", "- cdf[j]) energies[cut] = en return energies def photonic_energy_per_vol(self, time, E, flavor, photon_spectrum,", "Returns ------- pinch : float Source pinching parameter (unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t)) def", "self.energy_pdf(a, Ea, E.value).real def sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a random sample", "if any( fl in col for col in sn_data_table.keys() ): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s')", "* np.log(E) - \\ (1 + a) * np.log(Ea) - (1 + a)", "with m<n ) if there are remaining elements of x. Returns ------- i_part", "\\ (1 + a) * np.log(Ea) - (1 + a) * (E /", "] # Generate final partition of size <n if x.size is not multiple", "self.get_flux(t, flavor) else: nu_spectrum = mixing(self) print('Beginning {0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='')", "self.energy_spectrum(t, E, flavor) * self.get_flux(t, flavor) else: nu_spectrum = mixing(self) print('Beginning {0} simulation...", "via inverse transform sampling of the CDF of the neutrino energy distribution. Parameters", "lepton mean energy and lepton path length per MeV, sorted according to parameter", ": float (units s) Time relative to core bounce. E : `numpy.ndarray` Sorted", "an estimate of the luminosity vs. time as well as the energy spectrum", "0 cut = (a >= 0) & (Ea > 0) E_pdf = np.zeros(", "number between 0 and 1 and compare to the CDF # of the", "# Loop over all flavors in the table: for flavor in Flavor: fl", "---------- t : float Time relative to core bounce (units seconds). flavor :", "] # Ensure that last partition always has 2 or more elements if", "\"\"\"Compute the energy deposited in a cubic meter of ice by photons from", "interpolation t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0.: Enu[0] =", "sn_data_table['TIME'].to('s') # Loop over all flavors in the table: for flavor in Flavor:", "For each species of neutrino one requires an estimate of the luminosity vs.", "t[-2])) * rate # # return flux def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute", "of neutrinos). \"\"\" t = time.to(u.s).value luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t,", "Parameters ---------- time : float (units s) Time relative to core bounce. E", "= np.append(i_part[-2], i_part[-1]) i_part = i_part[0:-1] return i_part def get_time(self): \"\"\"Return source time", "and one size-m array ( with m<n ) if there are remaining elements", "flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- pinch : float Source pinching parameter", "a<0 but some model files/interpolations still have this a[a<0] = 0 cut =", "{0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='') # The following two lines exploit the", "time, flavor=Flavor.NU_E_BAR): \"\"\"Return source flux at time t for a given flavor. Parameters", "u = np.random.uniform(n) j = np.searchsorted(cdf, u) # Linearly interpolate in the CDF", "import units as u from astropy.table import Table from abc import ABC, abstractmethod", "+ a * np.log(E) - \\ (1 + a) * np.log(Ea) - (1", "Dictionary of L, <E>, and alpha versus time, keyed by neutrino flavor. luminosity,", "(E / Ea)) self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' ) # Energy CDF, useful", "arrays containing indices for the elements of x, and one size-m array (", "en return energies def photonic_energy_per_vol(self, time, E, flavor, photon_spectrum, mixing=None, n=1000): \"\"\"Compute the", "def __init__(self, name, spectral_model, progenitor_mass, progenitor_distance, time={}, luminosity={}, mean_energy={}, pinch={}): self.name = name", "= sn_data_table['ALPHA_NU_X'] else: raise KeyError(\"\"\"'{0}'\"\"\".format(fl)) luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor] = PchipInterpolator(time,", "linear interpolation t = time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0.: Enu[0]", "------- time : float Source time profile (units of s). \"\"\" return self.time", "flavor. photon_spectrum : `numpy.ndarray` (Units vary, m**2) Grid of the product of lepton", "luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False) mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False) pinch[flavor] = PchipInterpolator(time,", "len(i_part[-1]) < 2: i_part[-2] = np.append(i_part[-2], i_part[-1]) i_part = i_part[0:-1] return i_part def", "Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut = (a < 0) & (Ea > 0) E_pdf[:,", "end='') # The following two lines exploit the fact that astropy quantities will", "mixing is None: def nu_spectrum(t, E, flavor): return self.energy_spectrum(t, E, flavor) * self.get_flux(t,", "given flavor. Parameters ---------- t : float Time relative to core bounce (units", "estimate of the luminosity vs. time as well as the energy spectrum of", "are remaining elements of x. Returns ------- i_part : list List of index", "def photonic_energy_per_vol(self, time, E, flavor, photon_spectrum, mixing=None, n=1000): \"\"\"Compute the energy deposited in", "bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- pinch : float Source pinching", "cut = (a >= 0) & (Ea > 0) E_pdf = np.zeros( (Enu.size,", "col in sn_data_table.keys() ): L = sn_data_table['L_{:s}'.format(fl)].to('erg/s') E = sn_data_table['E_{:s}'.format(fl)].to('MeV') alpha = sn_data_table['ALPHA_{:s}'.format(fl)]", "Enu = E.to(u.MeV).value if Enu[0] == 0: Enu[0] = 1e-10 * u.MeV phot", "PDF. n : int Number of energy samples to produce. flavor : :class:`asteria.neutrino.Flavor`", "\"\"\"Return source mean energy at time t for a given flavor. Parameters ----------", ": float Source time profile (units of s). \"\"\" return self.time def get_luminosity(self,", "Number of energy samples to produce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns -------", "deposited by neutrinos of requested flavor \"\"\" H2O_in_ice = 3.053e28 # 1 /", "all flavors in the table: for flavor in Flavor: fl = flavor.name.upper() if", "the CDF of the neutrino energy distribution. Parameters ---------- t : float Time", "Energy per m**3 of ice deposited by neutrinos of requested flavor \"\"\" H2O_in_ice", "distribution at time t. Parameters ---------- t : float Time relative to core", "{}, {}, {} if config.source.table.format.lower() == 'fits': # Open FITS file, which contains", ") ] # Ensure that last partition always has 2 or more elements", "of the luminosity vs. time as well as the energy spectrum of the", "time t for a given flavor. Parameters ---------- t : float Time relative", "fluxes from supernovae as modeled in the CCSN literature. For each species of", "j = np.searchsorted(cdf, u) # Linearly interpolate in the CDF to produce a", "float ) E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \\ E=Enu.reshape(-1,1)) cut = (a", "# Generate final partition of size <n if x.size is not multiple of", "PDF of the neutrino energy distribution at time t. Parameters ---------- t :", "are generated via inverse transform sampling of the CDF of the neutrino energy", "in Flavor: fl = flavor.name.upper() if any( fl in col for col in", "return self.energy_pdf(a, Ea, E.value).real def sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR): \"\"\"Generate a random", "is None: def nu_spectrum(t, E, flavor): return self.energy_spectrum(t, E, flavor) * self.get_flux(t, flavor)", "PDF. Returns ------- spectrum : `numpy.ndarray` Table of PDF values computed as a", "L, extrapolate=False) mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False) pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False )", "alpha versus time, keyed by neutrino flavor. luminosity, mean_energy, pinch = {}, {},", "= x.size//n i_part = [ np.arange( i*n, (i+1)*n ) for i in range(nParts)", ": float Source luminosity (units of power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg /", "sources. This module encapsulates the basic parameters of neutrino fluxes from supernovae as", "pinching parameter (unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time, flavor=Flavor.NU_E_BAR): \"\"\"Return source flux", "E_per_V *= H2O_in_ice / ( 4 * np.pi * dist**2) * np.ediff1d(t, to_end=(t[-1]", "some time t for a particular neutrino flavor. The energies are generated via", "when numpy size is called on them, even if it is 1. E_per_V", "float Time relative to core bounce. flavor : :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns -------", "* u.MeV phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2 dist = self.progenitor_distance.to(u.m).value # m**2 flux", "(unit-less, count of neutrinos). \"\"\" t = time.to(u.s).value luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy", "time : float Source time profile (units of s). \"\"\" return self.time def", "\"\"\" t = time.to(u.s).value luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t, flavor).value if", "= np.ediff1d(t, to_end=(t[-1] - t[-2])) * rate # # return flux def energy_spectrum(self,", "= PchipInterpolator(time, alpha, extrapolate=False ) elif config.source.table.format.lower() == 'ascii': # ASCII will be", "a) + a * np.log(E) - \\ (1 + a) * np.log(Ea) -", "\"\"\"Returns a list of size-n numpy arrays containing indices for the elements of", "photons from SN neutrino interactions. Parameters ---------- time : float (units s) Time", "get_time(self): \"\"\"Return source time as numpy array. Returns ------- time : float Source", "pinch : float Source pinching parameter (unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self, time,", "------- pinch : float Source pinching parameter (unitless). \"\"\" return np.nan_to_num(self.pinch[flavor](t)) def get_flux(self,", "time.to(u.s).value luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value mean_energy = self.get_mean_energy(t, flavor).value if isinstance(t, (list, tuple,", ": :class:`asteria.neutrino.Flavor` Neutrino flavor. Returns ------- flux : float Source number flux (unit-less,", "[ np.arange( i*n, (i+1)*n ) for i in range(nParts) ] # Generate final", "of neutrino energies to compute the energy PDF. n : int Number of", "float Source luminosity (units of power). \"\"\" return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s)", "Source. Returns ------- Source An initialized source model. \"\"\" # Dictionary of L,", "source mean energy at time t for a given flavor. Parameters ---------- t", "Ea, E: \\ np.exp((1 + a) * np.log(1 + a) - loggamma(1 +", "energies sampled from the energy spectrum. \"\"\" cdf = self.energy_cdf(flavor, t, E) energies", "i_part[0:-1] return i_part def get_time(self): \"\"\"Return source time as numpy array. Returns -------", "# return flux def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR): \"\"\"Compute the PDF of the", "s) Time relative to core bounce. E : `numpy.ndarray` Sorted grid of neutrino", "elif dmtype == 'StellarDensity': # StellarDensity model, with options to add LMC and", "flavor, photon_spectrum, mixing=None, n=1000): \"\"\"Compute the energy deposited in a cubic meter of", "= time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0: Enu[0] = 1e-10 *", "extrapolate=False ) elif config.source.table.format.lower() == 'ascii': # ASCII will be supported! Promise, promise.", "np.ndarray)): flux = np.divide(luminosity, mean_energy, where=(mean_energy > 0), out=np.zeros(len(luminosity))) else: if mean_energy >", "np.arange( i*n, (i+1)*n ) for i in range(nParts) ] # Generate final partition", "float Source time profile (units of s). \"\"\" return self.time def get_luminosity(self, t,", "- cdf[j]) * (u[cut] - cdf[j]) energies[cut] = en return energies def photonic_energy_per_vol(self,", "= time.to(u.s).value Enu = E.to(u.MeV).value if Enu[0] == 0.: Enu[0] = 1e-10 #", "for random energy sampling. self.energy_cdf = lambda a, Ea, E: \\ gdtr(1., a", "vs. time as well as the energy spectrum of the neutrinos at any", "def nu_spectrum(t, E, flavor): return self.energy_spectrum(t, E, flavor) * self.get_flux(t, flavor) else: nu_spectrum", "L, <E>, and alpha versus time, keyed by neutrino flavor. luminosity, mean_energy, pinch", "= (a >= 0) & (Ea > 0) E_pdf = np.zeros( (Enu.size, t.size),", "that astropy quantities will # always return a number when numpy size is", "abstractmethod import numpy as np from scipy.special import loggamma, gdtr from scipy.interpolate import" ]
[ "... class NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable): self.enviromentVariable : str =", "InvalidIDError(BaseError): ... class NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable): self.enviromentVariable : str", "f'The enviroment variable {enviromentVariable} was not found.' super().__init__(self.message) def __str__(self): return f'{self.enviromentVariable} ->", "BaseError(Exception): ... class UnknownError(BaseError): ... class InvalidIDError(BaseError): ... class NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError):", "= enviromentVariable self.message = f'The enviroment variable {enviromentVariable} was not found.' super().__init__(self.message) def", "class BaseError(Exception): ... class UnknownError(BaseError): ... class InvalidIDError(BaseError): ... class NotFoundIDError(BaseError): ... class", "class NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable): self.enviromentVariable : str = enviromentVariable self.message = f'The", "self.message = f'The enviroment variable {enviromentVariable} was not found.' super().__init__(self.message) def __str__(self): return", "enviromentVariable self.message = f'The enviroment variable {enviromentVariable} was not found.' super().__init__(self.message) def __str__(self):", "... class NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable): self.enviromentVariable : str = enviromentVariable self.message =", "NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable): self.enviromentVariable : str = enviromentVariable self.message", "... class UnknownError(BaseError): ... class InvalidIDError(BaseError): ... class NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError): def", "def __init__(self, enviromentVariable): self.enviromentVariable : str = enviromentVariable self.message = f'The enviroment variable", "__init__(self, enviromentVariable): self.enviromentVariable : str = enviromentVariable self.message = f'The enviroment variable {enviromentVariable}", "class InvalidIDError(BaseError): ... class NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable): self.enviromentVariable :", "= f'The enviroment variable {enviromentVariable} was not found.' super().__init__(self.message) def __str__(self): return f'{self.enviromentVariable}", "enviromentVariable): self.enviromentVariable : str = enviromentVariable self.message = f'The enviroment variable {enviromentVariable} was", "enviroment variable {enviromentVariable} was not found.' super().__init__(self.message) def __str__(self): return f'{self.enviromentVariable} -> {self.message}'", "class NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable): self.enviromentVariable : str = enviromentVariable", "<gh_stars>1-10 class BaseError(Exception): ... class UnknownError(BaseError): ... class InvalidIDError(BaseError): ... class NotFoundIDError(BaseError): ...", "NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable): self.enviromentVariable : str = enviromentVariable self.message = f'The enviroment", "UnknownError(BaseError): ... class InvalidIDError(BaseError): ... class NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable):", "str = enviromentVariable self.message = f'The enviroment variable {enviromentVariable} was not found.' super().__init__(self.message)", ": str = enviromentVariable self.message = f'The enviroment variable {enviromentVariable} was not found.'", "... class InvalidIDError(BaseError): ... class NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError): def __init__(self, enviromentVariable): self.enviromentVariable", "class UnknownError(BaseError): ... class InvalidIDError(BaseError): ... class NotFoundIDError(BaseError): ... class NotFoundEnviromentVariableError(BaseError): def __init__(self,", "self.enviromentVariable : str = enviromentVariable self.message = f'The enviroment variable {enviromentVariable} was not" ]
[ "x = 1 特例 r = x // 2 + 1 while l", "mid = l + (r - l + 1) // 2 square =", "-> int: l = 0 # x = 1 特例 r = x", "mySqrt(self, x: int) -> int: l = 0 # x = 1 特例", "特例 r = x // 2 + 1 while l < r: mid", "def mySqrt(self, x: int) -> int: l = 0 # x = 1", "r = x // 2 + 1 while l < r: mid =", "l = 0 # x = 1 特例 r = x // 2", "= x // 2 + 1 while l < r: mid = l", "= 0 # x = 1 特例 r = x // 2 +", "= 1 特例 r = x // 2 + 1 while l <", "+ (r - l + 1) // 2 square = mid * mid", "* mid if square > x: r = mid - 1 else: l", "x // 2 + 1 while l < r: mid = l +", "1) // 2 square = mid * mid if square > x: r", "class Solution: def mySqrt(self, x: int) -> int: l = 0 # x", "mid * mid if square > x: r = mid - 1 else:", "x: int) -> int: l = 0 # x = 1 特例 r", "< r: mid = l + (r - l + 1) // 2", "- l + 1) // 2 square = mid * mid if square", "r: mid = l + (r - l + 1) // 2 square", "square > x: r = mid - 1 else: l = mid return", "= mid * mid if square > x: r = mid - 1", "int: l = 0 # x = 1 特例 r = x //", "0 # x = 1 特例 r = x // 2 + 1", "1 特例 r = x // 2 + 1 while l < r:", "> x: r = mid - 1 else: l = mid return l", "// 2 + 1 while l < r: mid = l + (r", "= l + (r - l + 1) // 2 square = mid", "square = mid * mid if square > x: r = mid -", "l + (r - l + 1) // 2 square = mid *", "+ 1 while l < r: mid = l + (r - l", "2 + 1 while l < r: mid = l + (r -", "l < r: mid = l + (r - l + 1) //", "Solution: def mySqrt(self, x: int) -> int: l = 0 # x =", "if square > x: r = mid - 1 else: l = mid", "<filename>problems/sqrtx/solution-2.py class Solution: def mySqrt(self, x: int) -> int: l = 0 #", "1 while l < r: mid = l + (r - l +", "mid if square > x: r = mid - 1 else: l =", "// 2 square = mid * mid if square > x: r =", "while l < r: mid = l + (r - l + 1)", "int) -> int: l = 0 # x = 1 特例 r =", "(r - l + 1) // 2 square = mid * mid if", "2 square = mid * mid if square > x: r = mid", "+ 1) // 2 square = mid * mid if square > x:", "l + 1) // 2 square = mid * mid if square >", "# x = 1 特例 r = x // 2 + 1 while" ]
[ "# Generated by Django 1.10.5 on 2017-03-01 15:31 from django.db import migrations class", "1.10.5 on 2017-03-01 15:31 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "migrations class Migration(migrations.Migration): dependencies = [ ('security', '0010_prisoner_profile_uniqueness'), ] operations = [ migrations.DeleteModel(name='SecurityDataUpdate'),", "2017-03-01 15:31 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('security', '0010_prisoner_profile_uniqueness'),", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('security', '0010_prisoner_profile_uniqueness'), ] operations", "<reponame>ministryofjustice/mtp-api # Generated by Django 1.10.5 on 2017-03-01 15:31 from django.db import migrations", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('security', '0010_prisoner_profile_uniqueness'), ] operations =", "import migrations class Migration(migrations.Migration): dependencies = [ ('security', '0010_prisoner_profile_uniqueness'), ] operations = [", "by Django 1.10.5 on 2017-03-01 15:31 from django.db import migrations class Migration(migrations.Migration): dependencies", "Generated by Django 1.10.5 on 2017-03-01 15:31 from django.db import migrations class Migration(migrations.Migration):", "class Migration(migrations.Migration): dependencies = [ ('security', '0010_prisoner_profile_uniqueness'), ] operations = [ migrations.DeleteModel(name='SecurityDataUpdate'), ]", "Django 1.10.5 on 2017-03-01 15:31 from django.db import migrations class Migration(migrations.Migration): dependencies =", "on 2017-03-01 15:31 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('security',", "15:31 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('security', '0010_prisoner_profile_uniqueness'), ]" ]
[ "with the main data frame logger.info(\"Done !\") logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe())) # Storing", "* 60 * 1852 # Get the equivalent of the latitude in meters", "of targets per tracks - timeInt and Time: mean time in ns since", "# accordingly cap_rel = abs(math.degrees( math.atan2(deltas[1][j - 1], - deltas[0][j - 1]))) #", "360) # Get the equivalent of the longitude in meters tracks['lat_m'] = tracks.x_gps", "0: result = pd.read_pickle(path_pickle) # read the pickle file else: logger.error(\"File empty !\")", "the target data targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by track length", "= [] for i in range(nb_target): index_targets += [i for j in range(nb_freq)]", "file else: logger.error(\"File empty !\") # Si le fichier Pickle est vide logger.info(\"done", "len(track_i), dist_range, dist_tot, tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x, sd_y, sd_z,", "sd_tot: sum of standard deviation - targets: matrix of all targets - freq:", "indexSounder = i indexTransducer = j logger.info(\"creating tables...\") # Extract the pickle data", "'vit_x', 'vit_y', 'vit_z', 'vit_range', 'sd_x', 'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca']) tracks_data", "to nearest neighbors in meters \"\"\" tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] #", "logger.error(\"File empty !\") # Si le fichier Pickle est vide logger.info(\"done !\") for", "latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1])) # distance between the 2 longitudes if", "runs, same for all tracks of each run - orient: orientation ('H' or", "1852 return dist def calc_distance_long(lat, lon1, lon2): \"\"\"Returns a distance between 2 longitudes", "pd.read_pickle(path_pickle) # read the pickle file else: logger.error(\"File empty !\") # Si le", "distance in m to transducer - TSalong, TSarthwart: mean angle in the transducer", "distance in m to the nearest neighbour - State, Abrv, tailleMoyenne: variables from", "else: logger.error(\"File empty !\") # Si le fichier Pickle est vide logger.info(\"done !\")", "TS and frequency data { \"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]),", "j logger.info(\"creating tables...\") # Extract the pickle data in several panda tables. nb_target", "different data frames as csv: # - targets, with individual targets of each", "TS_parameters['MinEchoNumber']] # Select by track length targets_data = targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x:", "1]) # delta in z axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) # delta", "# number of targets for the given sounder and transducer if nb_target >", "b20 from TScomp and taille moyenne # get the Nv value for each", "same for all tracks of each run - orient: orientation ('H' or 'V')", "merge of all the data tracks = targets.groupby(by=\"track\").target.agg('count') # get number of target", "provided # tracks movement analysis tracks_id = list(targets_data.groupby('track').groups) scores = [] for i", "for each run) if row.Name == name_transect: for header in hac_info.columns[1:]: tracks_data[header] =", "logger.info(\"targets ready !\") ##### Tracks grouping and analysis logger.info('Gathering tracks data...') tracks_data =", "# merge of all the data tracks = targets.groupby(by=\"track\").target.agg('count') # get number of", "logger.info(\"Done !\") logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe())) # Storing 2 different data frames as", "ready !\") ##### Tracks grouping and analysis logger.info('Gathering tracks data...') tracks_data = targets_data.groupby('track').mean()", "!\") return name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle) > 0: result = pd.read_pickle(path_pickle)", "tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(999) # relative and absolute heading is", "return name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle) > 0: result = pd.read_pickle(path_pickle) #", "TS_freq = freq[freq.TSfreq == freq_TS] # get the TScomp for the given reference", "cap_rel = abs(math.degrees( math.atan2(deltas[1][j - 1], - deltas[0][j - 1]))) # heading relative", "len(result[0][indexSounder][indexTransducer]) # number of targets for the given sounder and transducer if nb_target", "track vit_x = dist_x / delta_t # speed vit_y = dist_y / delta_t", "pymovies tracking and returns several key parameters for each track. input: - path_pickle:", "freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS)) # closest frequency from the", "dist = dlat * 60 * 1852 return dist def calc_distance_long(lat, lon1, lon2):", "{ \"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq * nb_target)", "angle (absolute and relative) in degrees (according to orientation) - vit_x, vit_y, vit_z,", "conjunction(*conditions): \"\"\"Multiple conditions filter for panda\"\"\" return functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1, lat2): \"\"\"Returns", "import functools import os import math logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\"", "tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get position of each tracks tracks['long_m'] = tracks.y_gps", "in range(nb_target): index_targets += [i for j in range(nb_freq)] targets = pd.DataFrame( #", "np.mean(deltas[7]) # mean relative heading of the track cap_abs = np.mean(deltas[8]) # mean", "np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm distances, indices = nbrs.kneighbors(array)", "Nv = pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv'] = -999 # No", "\"/\" + name_transect + \"_tracks.csv\" filename_2 = path_output + \"/\" + name_transect +", "position of each targets (relative and absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x',", "x: abs(x - freq_TS)) # closest frequency from the reference # frequency freq_TS", "meters array = np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the array for nearest", "TS of all frequencies or for the closest frequency from reference frequency -", "targets - freq: mean TScomp for each frequency \"\"\" if path_pickle[-7:] != \".pickle\":", "i in tracks_id: # for each track track_i = targets_data.loc[ targets_data['track'] == i,", "complementary info on the different runs, same for all tracks of each run", "beam - x, y, z, x_gps, y_gps, z_gps: relative and absolute position -", "index=range(len(scores)), # storing values as a panda data frame columns=['track', 'dist_x', 'dist_y', 'dist_z',", "dist_z tilt_angle = np.mean(deltas[6]) # mean tilt angle of the track cap_rel =", "following different axis - sd_x, sd_y, sd_z, sd_range, sd_ta: standard deviation of previous", "logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\" input: tracking data matrix ouput: column", "for index, row in hac_info.iterrows(): # add the hac_info columns (same for each", "datetime tracks_data['k_dist'] = point_processing(tracks_data) # Distance to closest neighbor for index, row in", "index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps']) TS_means = freq.groupby(by=\"target\").mean() # get the TScomp_mean: mean", "dist_x / len(track_i), dist_y / len(track_i), dist_z / len(track_i), dist_range, dist_tot, tilt_angle, cap_rel,", "track_i.z.iloc[j - 1]) # delta in z axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1])", "Sort by time deltas = [[], [], [], [], [], [], [], [],", "= (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2),", "def point_processing(tracks_data): \"\"\" input: tracking data matrix ouput: column of distances to nearest", "sd_x + sd_y + sd_z scores.append( [i, dist_x / len(track_i), dist_y / len(track_i),", "targets = pd.merge(targets, tracks_len, how='inner', on='track') # add the track length to the", "* np.log10(tracks_data['tailleMoyenne'])) # get the b20 from TScomp and taille moyenne # get", "= track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds() # time length of", "output of movies TS analysis - path_output: path to store output csv -", "< 0: cap_abs = 360 + cap_abs # correct to have 0-360° headings", "mean distance in m to transducer - TSalong, TSarthwart: mean angle in the", "on the different runs, same for all tracks of each run - orient:", "sd_cr, sd_ca] ) dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing values as a panda", "np.sum(deltas[4]) # dist is the length of the track on several dimensions dist_y", "absolute position - TScomp_mean, TScomp: mean TS of all frequencies or for the", "'vit_range', 'sd_x', 'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca']) tracks_data = pd.merge(tracks_data, dist_scores,", "taille moyenne # get the Nv value for each track path_Nv = path_output", "meters tracks['lat_m'] = tracks.x_gps * 60 * 1852 # Get the equivalent of", "length to the target data targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by", "transducer for j in range(len(result[10][i])): if result[10][i][j] == transducer: indexSounder = i indexTransducer", "1]))) # heading relative to the boat else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1],", "for nearest neighbors algorithm array = np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest", "np.log10(tracks_data['tailleMoyenne'])) # get the b20 from TScomp and taille moyenne # get the", "lat2 - lat1 dist = dlat * 60 * 1852 return dist def", "vit_z, vit_range: speed following different axis - sd_x, sd_y, sd_z, sd_range, sd_ta: standard", "Pickle est vide logger.info(\"done !\") for i in range(len(result[10])): # get index for", "individual targets of each points # - tracks, with the run track data", "following different axis - tilt_angle, cap_rel, cap_abs: tilt or heading angle (absolute and", "b20: b20 value - Nv: Nv value - dist_x, dist_y, dist_z, dist_range, dist_tot:", "track data filename_1 = path_output + \"/\" + name_transect + \"_tracks.csv\" filename_2 =", "data frames as csv: # - targets, with individual targets of each points", ">= TS_parameters['MinEchoNumber']] # Select by track length targets_data = targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda", "for a given latitude\"\"\" dlon = lon2 - lon1 dist = dlon *", "heading relative to the boat else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j -", "each points # - tracks, with the run track data filename_1 = path_output", "- tracks: matrix of tracks with: - track, target: relative and absolute index", "delta in y axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) # delta in z", "+ dist_z tilt_angle = np.mean(deltas[6]) # mean tilt angle of the track cap_rel", "absolute heading is irrelevant on vertical echo sounder deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i) -", "get the Nv value for each track path_Nv = path_output + '/' +", "data matrix ouput: column of distances to nearest neighbors in meters \"\"\" tracks", "track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds() # time length of the", "position - TScomp_mean, TScomp: mean TS of all frequencies or for the closest", "= np.std(deltas[8]) sd_tot = sd_x + sd_y + sd_z scores.append( [i, dist_x /", "= logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\" input: tracking data matrix ouput: column of", "TSarthwart: mean angle in the transducer beam - x, y, z, x_gps, y_gps,", "absolute (geographical) heading if cap_abs < 0: cap_abs = 360 + cap_abs #", "1])) # distance between the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1])) #", "in several panda tables. nb_target = len(result[0][indexSounder][indexTransducer]) # number of targets for the", "tracks, with the run track data filename_1 = path_output + \"/\" + name_transect", "frame logger.info(\"Done !\") logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe())) # Storing 2 different data frames", "'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range', 'sd_x', 'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca'])", "return dist def unit_vector(vector): \"\"\" Returns the unit vector of the vector. \"\"\"", "range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) # delta in x axis deltas[1].append(track_i.y.iloc[j]", "the array for nearest neighbors algorithm array = np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array)", "TS detection and tracks selection - hac_info: complementary info on the different runs,", "#vertical echo sounder tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j", "# time length of the track (s) dist_x = np.sum(deltas[4]) # dist is", "\"_Nv.csv\" if os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv'] =", "sounder tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1]", "dlat * 60 * 1852 return dist def calc_distance_long(lat, lon1, lon2): \"\"\"Returns a", "add the track length to the target data targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']]", "if track_i.x.iloc[ j] > 0: # check if x is coherent (beam is", "for TS extraction - TS_parameters: parameter for the TS detection and tracks selection", "string formats - k_dist: distance in m to the nearest neighbour - State,", "the equivalent of the longitude in meters tracks['lat_m'] = tracks.x_gps * 60 *", "header in hac_info.columns[1:]: tracks_data[header] = row[header] tracks_data['b20'] = tracks_data['TScomp'] - ( 20 *", "Distance to closest neighbor for index, row in hac_info.iterrows(): # add the hac_info", "/ np.linalg.norm(vector) def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info, orient): \"\"\" Process the", "\"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) ) freq = pd.DataFrame( #", "summary :') logger.debug(str(tracks_data.describe())) # Storing 2 different data frames as csv: # -", "dlat = lat2 - lat1 dist = dlat * 60 * 1852 return", "in m to the nearest neighbour - State, Abrv, tailleMoyenne: variables from the", "dist_x + dist_y + dist_z tilt_angle = np.mean(deltas[6]) # mean tilt angle of", "in the transducer beam - x, y, z, x_gps, y_gps, z_gps: relative and", "frequence for TS extraction - TS_parameters: parameter for the TS detection and tracks", "= pd.DataFrame(scores, index=range(len(scores)), # storing values as a panda data frame columns=['track', 'dist_x',", "different runs, same for all tracks of each run - orient: orientation ('H'", "of previous displacement and angle - sd_tot: sum of standard deviation - targets:", "as mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime tracks_data['k_dist'] = point_processing(tracks_data) # Distance", "how='inner', on='track') # add the track length to the target data targets_selected =", "track. input: - path_pickle: path to a pickle file, output of movies TS", "for i in range(len(result[10])): # get index for the sounder and transducer according", "equivalent of the latitude in meters array = np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps]) #", "import Sv import logging import pandas as pd import numpy as np import", "file !\") return name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle) > 0: result =", "= np.std(deltas[2]) sd_range = np.std(deltas[3]) sd_ta = np.std(deltas[6]) sd_cr = np.std(deltas[7]) sd_ca =", "1])) - 90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(999) # relative", "track_i = targets_data.loc[ targets_data['track'] == i, ['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']]", "logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\" input: tracking data matrix ouput: column of distances to", "reference frequence for TS extraction - TS_parameters: parameter for the TS detection and", "np.std(deltas[8]) sd_tot = sd_x + sd_y + sd_z scores.append( [i, dist_x / len(track_i),", "sklearn.neighbors import NearestNeighbors import Sv import logging import pandas as pd import numpy", "[], [], [], [], []] for j in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j", "variables from the hac info file - b20: b20 value - Nv: Nv", "positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps']) TS_means = freq.groupby(by=\"target\").mean() # get", "- track_i.z.iloc[j - 1]) # delta in z axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j -", "np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the array for nearest neighbors algorithm array", "different axis - sd_x, sd_y, sd_z, sd_range, sd_ta: standard deviation of previous displacement", "mean relative heading of the track cap_abs = np.mean(deltas[8]) # mean absolute heading", "echo sounder if track_i.x.iloc[ j] > 0: # check if x is coherent", "TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) # merge of all the data tracks = targets.groupby(by=\"track\").target.agg('count') #", "sd_ca = np.std(deltas[8]) sd_tot = sd_x + sd_y + sd_z scores.append( [i, dist_x", "relative to the boat else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1])))", "+ cap_abs # correct to have 0-360° headings tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j -", "TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS)) #", "of targets for the given sounder and transducer if nb_target > 0: #", "the reference # frequency freq_TS TS_freq = freq[freq.TSfreq == freq_TS] # get the", "for i in range(nb_target): index_targets += [i for j in range(nb_freq)] targets =", "No Nv data provided # tracks movement analysis tracks_id = list(targets_data.groupby('track').groups) scores =", "1] - track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds() # time length of the track (s)", "correct to have 0-360° headings tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2", "dist_y / len(track_i), dist_z / len(track_i), dist_range, dist_tot, tilt_angle, cap_rel, cap_abs, vit_x, vit_y,", "import os import math logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\" input: tracking", "est vide logger.info(\"done !\") for i in range(len(result[10])): # get index for the", "== i, ['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']] track_i = track_i.sort_values('timeTarget') #", "= 360 + cap_abs # correct to have 0-360° headings tilt_angle = (math.degrees(", "of the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical echo sounder tilt_angle = (math.degrees(", "target per tracks tracks_len = pd.DataFrame( {'track': tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index)) ) targets", "name_transect + \"_tracks.csv\" filename_2 = path_output + \"/\" + name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1,", "displacement and angle - sd_tot: sum of standard deviation - targets: matrix of", "deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1])) # distance between the 2 longitudes if orient", "orient: orientation ('H' or 'V') outputs: multiple csv - tracks: matrix of tracks", "if os.path.getsize(path_pickle) > 0: result = pd.read_pickle(path_pickle) # read the pickle file else:", "# - targets, with individual targets of each points # - tracks, with", "name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files saved !\") freq_data = freq.groupby('TSfreq').mean()", "dist_range, dist_tot, tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x, sd_y, sd_z, sd_range,", "- 1]) # delta in z axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) #", "pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps',", "dist_range, dist_tot: mean displacement in m following different axis - tilt_angle, cap_rel, cap_abs:", "i in range(nb_target): index_targets += [i for j in range(nb_freq)] targets = pd.DataFrame(", "= math.degrees( math.atan2(deltas[5][j - 1], deltas[4][j - 1])) # absolute (geographical) heading if", "Abrv, tailleMoyenne: variables from the hac info file - b20: b20 value -", "\"\"\" Returns the unit vector of the vector. \"\"\" return vector / np.linalg.norm(vector)", "= len(result[0][indexSounder][indexTransducer]) # number of targets for the given sounder and transducer if", "how='inner', on='track') # merge with the main data frame logger.info(\"Done !\") logger.debug('Tracks summary", "sd_ta = np.std(deltas[6]) sd_cr = np.std(deltas[7]) sd_ca = np.std(deltas[8]) sd_tot = sd_x +", "if nb_target > 0: # check if any targets nb_freq = int(len(result[9][indexSounder][indexTransducer]) /", "csv: # - targets, with individual targets of each points # - tracks,", "\"\"\"Returns a distance between 2 latitudes\"\"\" dlat = lat2 - lat1 dist =", "TSrange: mean distance in m to transducer - TSalong, TSarthwart: mean angle in", "dist_z / len(track_i), dist_range, dist_tot, tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x,", "merge with the main data frame logger.info(\"Done !\") logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe())) #", "the pickle file from pymovies tracking and returns several key parameters for each", "index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps'])", "dist_y = np.sum(deltas[5]) dist_z = np.sum(deltas[2]) dist_range = np.sum(deltas[3]) dist_tot = dist_x +", "# get the position of each targets (relative and absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer],", "between 2 latitudes\"\"\" dlat = lat2 - lat1 dist = dlat * 60", "the track (s) dist_x = np.sum(deltas[4]) # dist is the length of the", "60 * 1852 # Get the equivalent of the latitude in meters array", "column of distances to nearest neighbors in meters \"\"\" tracks = tracks_data.loc[:, ['x_gps',", "in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) # delta in x axis", "int (ns, 1970) logger.info(\"targets ready !\") ##### Tracks grouping and analysis logger.info('Gathering tracks", "length targets_data = targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) # convert time to", "algorithm array = np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm distances,", "= np.mean(deltas[8]) # mean absolute heading of the track vit_x = dist_x /", "TS_parameters: parameter for the TS detection and tracks selection - hac_info: complementary info", "- 90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical", "deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical echo sounder tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] **", "a pickle file !\") return name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle) > 0:", "of distances to nearest neighbors in meters \"\"\" tracks = tracks_data.loc[:, ['x_gps', 'y_gps',", "# merge with the main data frame logger.info(\"Done !\") logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe()))", "dist_y + dist_z tilt_angle = np.mean(deltas[6]) # mean tilt angle of the track", "sounder deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds() #", "90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical echo", "grouping and analysis logger.info('Gathering tracks data...') tracks_data = targets_data.groupby('track').mean() # group targets by", "# Select by track length targets_data = targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value)", "columns (same for each run) if row.Name == name_transect: for header in hac_info.columns[1:]:", "0: cap_abs = 360 + cap_abs # correct to have 0-360° headings tilt_angle", "array = np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the array for nearest neighbors", "dimensions dist_y = np.sum(deltas[5]) dist_z = np.sum(deltas[2]) dist_range = np.sum(deltas[3]) dist_tot = dist_x", "tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime tracks_data['k_dist'] = point_processing(tracks_data) # Distance to closest", "index=False) logger.info(\"files saved !\") freq_data = freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index filename_3 = path_output", "of the vector. \"\"\" return vector / np.linalg.norm(vector) def pickle_processing(path_pickle, path_output, transducer, freq_TS,", "'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca']) tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge", "Extract the pickle data in several panda tables. nb_target = len(result[0][indexSounder][indexTransducer]) # number", "to store output csv - transducer; name of the used transducer - freq_TS:", "logging import pandas as pd import numpy as np import functools import os", "track_i.x.iloc[ j] > 0: # check if x is coherent (beam is oriented", "tracks_data['Nv'] = -999 # No Nv data provided # tracks movement analysis tracks_id", "each track. input: - path_pickle: path to a pickle file, output of movies", "- 1], deltas[4][j - 1])) # absolute (geographical) heading if cap_abs < 0:", "index_targets = [] for i in range(nb_target): index_targets += [i for j in", "value for each track path_Nv = path_output + '/' + name_transect + \"_Nv.csv\"", "= dist_range / delta_t sd_x = np.std(deltas[4]) # standard deviation sd_y = np.std(deltas[5])", "data...') tracks_data = targets_data.groupby('track').mean() # group targets by tracks, keep each parameters as", "tracks_id: # for each track track_i = targets_data.loc[ targets_data['track'] == i, ['timeTarget', 'x',", "and Time: mean time in ns since 1970 and in string formats -", "for all frequencies TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x", "deltas[2][j - 1])) - 90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel)", "NearestNeighbors import Sv import logging import pandas as pd import numpy as np", "math.atan2(deltas[1][j - 1], - deltas[0][j - 1]))) # heading relative to the boat", "- track_i.y.iloc[j - 1]) # delta in y axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j -", "for j in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) # delta in", "targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) # merge of all the", "= os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle) > 0: result = pd.read_pickle(path_pickle) # read the", "of the track vit_x = dist_x / delta_t # speed vit_y = dist_y", "key parameters for each track. input: - path_pickle: path to a pickle file,", "!\") ##### Tracks grouping and analysis logger.info('Gathering tracks data...') tracks_data = targets_data.groupby('track').mean() #", "dist = dlon * (40075000 * math.cos(lat) / 360) return dist def unit_vector(vector):", "track_i.sort_values('timeTarget') # Sort by time deltas = [[], [], [], [], [], [],", "orientation ('H' or 'V') outputs: multiple csv - tracks: matrix of tracks with:", "dist def unit_vector(vector): \"\"\" Returns the unit vector of the vector. \"\"\" return", "for the given sounder and transducer if nb_target > 0: # check if", "'y_gps', 'z_gps']] # get position of each tracks tracks['long_m'] = tracks.y_gps * (", "math.atan2(deltas[5][j - 1], deltas[4][j - 1])) # absolute (geographical) heading if cap_abs <", "analysis - path_output: path to store output csv - transducer; name of the", "for j in range(len(result[10][i])): if result[10][i][j] == transducer: indexSounder = i indexTransducer =", "Nv) else: tracks_data['Nv'] = -999 # No Nv data provided # tracks movement", "- lon1 dist = dlon * (40075000 * math.cos(lat) / 360) return dist", "+ \"_Nv.csv\" if os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv']", "return distances[:, 1] def conjunction(*conditions): \"\"\"Multiple conditions filter for panda\"\"\" return functools.reduce(np.logical_and, conditions)", "= point_processing(tracks_data) # Distance to closest neighbor for index, row in hac_info.iterrows(): #", "vit_range = dist_range / delta_t sd_x = np.std(deltas[4]) # standard deviation sd_y =", "- vit_x, vit_y, vit_z, vit_range: speed following different axis - sd_x, sd_y, sd_z,", "'dist_y', 'dist_z', 'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range', 'sd_x', 'sd_y',", "frequency from the reference # frequency freq_TS TS_freq = freq[freq.TSfreq == freq_TS] #", "of the track (s) dist_x = np.sum(deltas[4]) # dist is the length of", "# Extract the pickle data in several panda tables. nb_target = len(result[0][indexSounder][indexTransducer]) #", "tracks: matrix of tracks with: - track, target: relative and absolute index for", "( 40075000 * np.cos(tracks.x_gps) / 360) # Get the equivalent of the longitude", "'z', 'TSrange', 'x_gps', 'y_gps']] track_i = track_i.sort_values('timeTarget') # Sort by time deltas =", "track track_i = targets_data.loc[ targets_data['track'] == i, ['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps',", "}, index=range(nb_freq * nb_target) ) # get the position of each targets (relative", "le fichier Pickle est vide logger.info(\"done !\") for i in range(len(result[10])): # get", "= lon2 - lon1 dist = dlon * (40075000 * math.cos(lat) / 360)", "path_output + '/' + name_transect + \"_Nv.csv\" if os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv) tracks_data['Nv']", "= dist_y / delta_t vit_z = dist_z / delta_t vit_range = dist_range /", "deltas[6].append(tilt_angle) deltas[7].append(999) # relative and absolute heading is irrelevant on vertical echo sounder", "min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS)) # closest frequency from the reference #", "axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) # delta in y axis deltas[2].append(track_i.z.iloc[j] -", "filename_1 = path_output + \"/\" + name_transect + \"_tracks.csv\" filename_2 = path_output +", "+ sd_z scores.append( [i, dist_x / len(track_i), dist_y / len(track_i), dist_z / len(track_i),", "panda's datetime tracks_data['k_dist'] = point_processing(tracks_data) # Distance to closest neighbor for index, row", "and analysis logger.info('Gathering tracks data...') tracks_data = targets_data.groupby('track').mean() # group targets by tracks,", "a distance between 2 latitudes\"\"\" dlat = lat2 - lat1 dist = dlat", "to given transducer for j in range(len(result[10][i])): if result[10][i][j] == transducer: indexSounder =", "of standard deviation - targets: matrix of all targets - freq: mean TScomp", "path_output + \"/\" + name_transect + \"_freq.csv\" freq_data.to_csv(filename_3, index=False) else: logger.error(\"No targets !!!\")", "1970 and in string formats - k_dist: distance in m to the nearest", "'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca']) tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge with", "!\") for i in range(len(result[10])): # get index for the sounder and transducer", "'z_gps']) TS_means = freq.groupby(by=\"target\").mean() # get the TScomp_mean: mean TScomp for all frequencies", "# correct to have 0-360° headings tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] **", "# Si le fichier Pickle est vide logger.info(\"done !\") for i in range(len(result[10])):", "deltas[7].append(999) # relative and absolute heading is irrelevant on vertical echo sounder deltas[8].append(999)", "2 longitudes for a given latitude\"\"\" dlon = lon2 - lon1 dist =", "{ \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]),", "= row[header] tracks_data['b20'] = tracks_data['TScomp'] - ( 20 * np.log10(tracks_data['tailleMoyenne'])) # get the", "lon1 dist = dlon * (40075000 * math.cos(lat) / 360) return dist def", "and taille moyenne # get the Nv value for each track path_Nv =", "'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS)) # closest frequency from", "deltas[4][j - 1])) # absolute (geographical) heading if cap_abs < 0: cap_abs =", "2 + deltas[1][j - 1] ** 2), deltas[2][j - 1])) - 90) #", "tracks - timeInt and Time: mean time in ns since 1970 and in", "cap_rel, cap_abs: tilt or heading angle (absolute and relative) in degrees (according to", "(40075000 * math.cos(lat) / 360) return dist def unit_vector(vector): \"\"\" Returns the unit", "axis - sd_x, sd_y, sd_z, sd_range, sd_ta: standard deviation of previous displacement and", "sd_y, sd_z, sd_range, sd_ta: standard deviation of previous displacement and angle - sd_tot:", "the length of the track on several dimensions dist_y = np.sum(deltas[5]) dist_z =", "transducer according to given transducer for j in range(len(result[10][i])): if result[10][i][j] == transducer:", "target data targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by track length targets_data", "= pd.DataFrame( # TS and frequency data { \"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\":", "sd_z = np.std(deltas[2]) sd_range = np.std(deltas[3]) sd_ta = np.std(deltas[6]) sd_cr = np.std(deltas[7]) sd_ca", "of the track cap_abs = np.mean(deltas[8]) # mean absolute heading of the track", "# Get the equivalent of the latitude in meters array = np.vstack( [tracks.lat_m,", "freq_TS)) # closest frequency from the reference # frequency freq_TS TS_freq = freq[freq.TSfreq", "target: relative and absolute index for each tracks - TSrange: mean distance in", "get position of each tracks tracks['long_m'] = tracks.y_gps * ( 40075000 * np.cos(tracks.x_gps)", "= NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm distances, indices = nbrs.kneighbors(array) return distances[:,", "the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical echo sounder tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j", "tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1] **", "array = np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm distances, indices", "orient == 'H': #Horizontal echo sounder if track_i.x.iloc[ j] > 0: # check", "(s) dist_x = np.sum(deltas[4]) # dist is the length of the track on", "relative and absolute position - TScomp_mean, TScomp: mean TS of all frequencies or", "tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get position of each tracks tracks['long_m']", "logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\" input: tracking data matrix ouput: column of distances", "TScomp_mean: mean TScomp for all frequencies TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']),", "for the closest frequency from reference frequency - nb_target: number of targets per", "dlon * (40075000 * math.cos(lat) / 360) return dist def unit_vector(vector): \"\"\" Returns", "= lat2 - lat1 dist = dlat * 60 * 1852 return dist", "= [] for i in tracks_id: # for each track track_i = targets_data.loc[", "z axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) # delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j", "the TS detection and tracks selection - hac_info: complementary info on the different", "np.std(deltas[5]) sd_z = np.std(deltas[2]) sd_range = np.std(deltas[3]) sd_ta = np.std(deltas[6]) sd_cr = np.std(deltas[7])", "- 1]))) cap_abs = math.degrees( math.atan2(deltas[5][j - 1], deltas[4][j - 1])) # absolute", "# nearest neighbors algorithm distances, indices = nbrs.kneighbors(array) return distances[:, 1] def conjunction(*conditions):", "import NearestNeighbors import Sv import logging import pandas as pd import numpy as", "delta_t # speed vit_y = dist_y / delta_t vit_z = dist_z / delta_t", "nbrs.kneighbors(array) return distances[:, 1] def conjunction(*conditions): \"\"\"Multiple conditions filter for panda\"\"\" return functools.reduce(np.logical_and,", "tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index)) ) targets = pd.merge(targets, tracks_len, how='inner', on='track') # add", "[] for i in tracks_id: # for each track track_i = targets_data.loc[ targets_data['track']", "TScomp_mean, TScomp: mean TS of all frequencies or for the closest frequency from", "value - Nv: Nv value - dist_x, dist_y, dist_z, dist_range, dist_tot: mean displacement", "- path_pickle: path to a pickle file, output of movies TS analysis -", "vit_x = dist_x / delta_t # speed vit_y = dist_y / delta_t vit_z", "# tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(999) # relative and absolute heading", "/ len(track_i), dist_range, dist_tot, tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x, sd_y,", "frequency data { \"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq", "the track cap_abs = np.mean(deltas[8]) # mean absolute heading of the track vit_x", "mean angle in the transducer beam - x, y, z, x_gps, y_gps, z_gps:", "* 1852 return dist def calc_distance_long(lat, lon1, lon2): \"\"\"Returns a distance between 2", "= np.sum(deltas[4]) # dist is the length of the track on several dimensions", "Storing 2 different data frames as csv: # - targets, with individual targets", "of the track on several dimensions dist_y = np.sum(deltas[5]) dist_z = np.sum(deltas[2]) dist_range", "track_i.TSrange.iloc[j - 1]) # delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1])) # distance", "- targets, with individual targets of each points # - tracks, with the", "in degrees (according to orientation) - vit_x, vit_y, vit_z, vit_range: speed following different", "the position of each targets (relative and absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])),", "1], deltas[4][j - 1])) # absolute (geographical) heading if cap_abs < 0: cap_abs", "x, y, z, x_gps, y_gps, z_gps: relative and absolute position - TScomp_mean, TScomp:", "np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) ) freq = pd.DataFrame(", "main data frame logger.info(\"Done !\") logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe())) # Storing 2 different", "and absolute position - TScomp_mean, TScomp: mean TS of all frequencies or for", "!\") targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) # merge of all", "# distance between the 2 longitudes if orient == 'H': #Horizontal echo sounder", "in meters tracks['lat_m'] = tracks.x_gps * 60 * 1852 # Get the equivalent", "in range(nb_freq)] targets = pd.DataFrame( # individual target data { \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\":", "np.sum(deltas[3]) dist_tot = dist_x + dist_y + dist_z tilt_angle = np.mean(deltas[6]) # mean", "sd_range, sd_tot, sd_ta, sd_cr, sd_ca] ) dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing values", "= delta_t.total_seconds() # time length of the track (s) dist_x = np.sum(deltas[4]) #", "each run - orient: orientation ('H' or 'V') outputs: multiple csv - tracks:", "delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1])) # distance between the 2 latitudes", "# relative and absolute heading is irrelevant on vertical echo sounder deltas[8].append(999) delta_t", "# standard deviation sd_y = np.std(deltas[5]) sd_z = np.std(deltas[2]) sd_range = np.std(deltas[3]) sd_ta", "dist_y / delta_t vit_z = dist_z / delta_t vit_range = dist_range / delta_t", "tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv'] = -999 # No Nv data provided", "for the given reference frequency TS_freq.index = range(len(TS_freq)) logger.info(\"done !\") targets = pd.concat([targets,", "returns several key parameters for each track. input: - path_pickle: path to a", "neighbors in meters \"\"\" tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get position", "saved !\") freq_data = freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index filename_3 = path_output + \"/\"", "freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index filename_3 = path_output + \"/\" + name_transect + \"_freq.csv\"", "- track_i.x.iloc[j - 1]) # delta in x axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j -", "= tracks.y_gps * ( 40075000 * np.cos(tracks.x_gps) / 360) # Get the equivalent", "range(len(result[10][i])): if result[10][i][j] == transducer: indexSounder = i indexTransducer = j logger.info(\"creating tables...\")", "- ( 20 * np.log10(tracks_data['tailleMoyenne'])) # get the b20 from TScomp and taille", "'z_gps']] # get position of each tracks tracks['long_m'] = tracks.y_gps * ( 40075000", "# - tracks, with the run track data filename_1 = path_output + \"/\"", "of each tracks tracks['long_m'] = tracks.y_gps * ( 40075000 * np.cos(tracks.x_gps) / 360)", "vide logger.info(\"done !\") for i in range(len(result[10])): # get index for the sounder", "1970) logger.info(\"targets ready !\") ##### Tracks grouping and analysis logger.info('Gathering tracks data...') tracks_data", "sum of standard deviation - targets: matrix of all targets - freq: mean", "transducer if nb_target > 0: # check if any targets nb_freq = int(len(result[9][indexSounder][indexTransducer])", "= abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1]))) cap_abs = math.degrees( math.atan2(deltas[5][j - 1],", "vit_x, vit_y, vit_z, vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot, sd_ta, sd_cr, sd_ca] )", "unit vector of the vector. \"\"\" return vector / np.linalg.norm(vector) def pickle_processing(path_pickle, path_output,", "data in several panda tables. nb_target = len(result[0][indexSounder][indexTransducer]) # number of targets for", "# for each track track_i = targets_data.loc[ targets_data['track'] == i, ['timeTarget', 'x', 'y',", "deltas[0][j - 1]))) # heading relative to the boat else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j", "sd_ta: standard deviation of previous displacement and angle - sd_tot: sum of standard", "'/' + name_transect + \"_Nv.csv\" if os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data,", "each frequency \"\"\" if path_pickle[-7:] != \".pickle\": # Check the pickle file logger.error(\"Not", "- 1]) # delta in y axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) #", "nearest neighbors algorithm array = np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors", "/ nb_target) index_targets = [] for i in range(nb_target): index_targets += [i for", "tracks_len, how='inner', on='track') # add the track length to the target data targets_selected", "array for nearest neighbors algorithm array = np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) #", "TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS)) # closest frequency", "closest frequency from reference frequency - nb_target: number of targets per tracks -", "the different runs, same for all tracks of each run - orient: orientation", "reference frequency TS_freq.index = range(len(TS_freq)) logger.info(\"done !\") targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'],", "90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(999) # relative and absolute", "mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime tracks_data['k_dist'] = point_processing(tracks_data) # Distance to", "= tracks_data['TScomp'] - ( 20 * np.log10(tracks_data['tailleMoyenne'])) # get the b20 from TScomp", "path_Nv = path_output + '/' + name_transect + \"_Nv.csv\" if os.path.exists(path_Nv): Nv =", "pd.DataFrame( # individual target data { \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\":", "data filename_1 = path_output + \"/\" + name_transect + \"_tracks.csv\" filename_2 = path_output", "= targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) # convert time to int (ns,", "frequency from reference frequency - nb_target: number of targets per tracks - timeInt", "np import functools import os import math logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data):", "of the used transducer - freq_TS: reference frequence for TS extraction - TS_parameters:", "z_gps: relative and absolute position - TScomp_mean, TScomp: mean TS of all frequencies", "from the reference # frequency freq_TS TS_freq = freq[freq.TSfreq == freq_TS] # get", "name of the used transducer - freq_TS: reference frequence for TS extraction -", "targets = pd.DataFrame( # individual target data { \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\":", "number of targets per tracks - timeInt and Time: mean time in ns", "pickle file !\") return name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle) > 0: result", "TS extraction - TS_parameters: parameter for the TS detection and tracks selection -", "= path_output + \"/\" + name_transect + \"_freq.csv\" freq_data.to_csv(filename_3, index=False) else: logger.error(\"No targets", "j in range(len(result[10][i])): if result[10][i][j] == transducer: indexSounder = i indexTransducer = j", "mean TS of all frequencies or for the closest frequency from reference frequency", "'sd_cr', 'sd_ca']) tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge with the main", "given sounder and transducer if nb_target > 0: # check if any targets", "a given latitude\"\"\" dlon = lon2 - lon1 dist = dlon * (40075000", "else: tracks_data['Nv'] = -999 # No Nv data provided # tracks movement analysis", "relative and absolute index for each tracks - TSrange: mean distance in m", "= freq.groupby(by=\"target\").mean() # get the TScomp_mean: mean TScomp for all frequencies TS_means =", "track length to the target data targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select", "# Storing 2 different data frames as csv: # - targets, with individual", "np.sum(deltas[2]) dist_range = np.sum(deltas[3]) dist_tot = dist_x + dist_y + dist_z tilt_angle =", "by track length targets_data = targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) # convert", "return functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1, lat2): \"\"\"Returns a distance between 2 latitudes\"\"\" dlat", "Returns the unit vector of the vector. \"\"\" return vector / np.linalg.norm(vector) def", "if path_pickle[-7:] != \".pickle\": # Check the pickle file logger.error(\"Not a pickle file", "from sklearn.neighbors import NearestNeighbors import Sv import logging import pandas as pd import", "# TS and frequency data { \"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\":", "starboard), corrects direction # accordingly cap_rel = abs(math.degrees( math.atan2(deltas[1][j - 1], - deltas[0][j", "values as a panda data frame columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot', 'tilt_angle',", "fichier Pickle est vide logger.info(\"done !\") for i in range(len(result[10])): # get index", "with individual targets of each points # - tracks, with the run track", "matrix ouput: column of distances to nearest neighbors in meters \"\"\" tracks =", "- timeInt and Time: mean time in ns since 1970 and in string", "track length targets_data = targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) # convert time", "relative) in degrees (according to orientation) - vit_x, vit_y, vit_z, vit_range: speed following", "speed vit_y = dist_y / delta_t vit_z = dist_z / delta_t vit_range =", "= list(targets_data.groupby('track').groups) scores = [] for i in tracks_id: # for each track", "# individual target data { \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]),", "each targets (relative and absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z'])", "orient): \"\"\" Process the pickle file from pymovies tracking and returns several key", "('H' or 'V') outputs: multiple csv - tracks: matrix of tracks with: -", "all the data tracks = targets.groupby(by=\"track\").target.agg('count') # get number of target per tracks", "dist_x = np.sum(deltas[4]) # dist is the length of the track on several", "= dist_x / delta_t # speed vit_y = dist_y / delta_t vit_z =", "sd_y + sd_z scores.append( [i, dist_x / len(track_i), dist_y / len(track_i), dist_z /", "the track cap_rel = np.mean(deltas[7]) # mean relative heading of the track cap_abs", "# No Nv data provided # tracks movement analysis tracks_id = list(targets_data.groupby('track').groups) scores", "indices = nbrs.kneighbors(array) return distances[:, 1] def conjunction(*conditions): \"\"\"Multiple conditions filter for panda\"\"\"", "cap_abs = math.degrees( math.atan2(deltas[5][j - 1], deltas[4][j - 1])) # absolute (geographical) heading", "- TS_parameters: parameter for the TS detection and tracks selection - hac_info: complementary", "1] def conjunction(*conditions): \"\"\"Multiple conditions filter for panda\"\"\" return functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1,", "\"\"\"Returns a distance between 2 longitudes for a given latitude\"\"\" dlon = lon2", "or heading angle (absolute and relative) in degrees (according to orientation) - vit_x,", "'sd_x', 'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca']) tracks_data = pd.merge(tracks_data, dist_scores, how='inner',", "a pickle file, output of movies TS analysis - path_output: path to store", "each track path_Nv = path_output + '/' + name_transect + \"_Nv.csv\" if os.path.exists(path_Nv):", "dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing values as a panda data frame columns=['track',", "the unit vector of the vector. \"\"\" return vector / np.linalg.norm(vector) def pickle_processing(path_pickle,", "of each run - orient: orientation ('H' or 'V') outputs: multiple csv -", "- freq: mean TScomp for each frequency \"\"\" if path_pickle[-7:] != \".pickle\": #", "TS analysis - path_output: path to store output csv - transducer; name of", "> 0: # check if any targets nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets", "key=lambda x: abs(x - freq_TS)) # closest frequency from the reference # frequency", "lon2): \"\"\"Returns a distance between 2 longitudes for a given latitude\"\"\" dlon =", "# Distance to closest neighbor for index, row in hac_info.iterrows(): # add the", "TScomp and taille moyenne # get the Nv value for each track path_Nv", "logger.info(\"creating tables...\") # Extract the pickle data in several panda tables. nb_target =", ") # get the position of each targets (relative and absolute) position =", "= tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get position of each tracks tracks['long_m'] =", "import logging import pandas as pd import numpy as np import functools import", "closest neighbor for index, row in hac_info.iterrows(): # add the hac_info columns (same", "b20 value - Nv: Nv value - dist_x, dist_y, dist_z, dist_range, dist_tot: mean", "tracks - TSrange: mean distance in m to transducer - TSalong, TSarthwart: mean", "= path_output + \"/\" + name_transect + \"_tracks.csv\" filename_2 = path_output + \"/\"", "TS_freq.index = range(len(TS_freq)) logger.info(\"done !\") targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1)", "dist def calc_distance_long(lat, lon1, lon2): \"\"\"Returns a distance between 2 longitudes for a", "if cap_abs < 0: cap_abs = 360 + cap_abs # correct to have", "in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1])) # distance between the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j],", "( 20 * np.log10(tracks_data['tailleMoyenne'])) # get the b20 from TScomp and taille moyenne", "+ name_transect + \"_tracks.csv\" filename_2 = path_output + \"/\" + name_transect + \"_targets.csv\"", "#Horizontal echo sounder if track_i.x.iloc[ j] > 0: # check if x is", "the hac info file - b20: b20 value - Nv: Nv value -", "movies TS analysis - path_output: path to store output csv - transducer; name", "index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq * nb_target) ) #", "filename_3 = path_output + \"/\" + name_transect + \"_freq.csv\" freq_data.to_csv(filename_3, index=False) else: logger.error(\"No", "os import math logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\" input: tracking data", "# Get the equivalent of the longitude in meters tracks['lat_m'] = tracks.x_gps *", "np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) ) freq = pd.DataFrame( # TS and frequency data {", "# delta in x axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) # delta in", "# mean absolute heading of the track vit_x = dist_x / delta_t #", "logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe())) # Storing 2 different data frames as csv: #", "= dlon * (40075000 * math.cos(lat) / 360) return dist def unit_vector(vector): \"\"\"", "the closest frequency from reference frequency - nb_target: number of targets per tracks", "0-360° headings tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j -", "\"\"\" input: tracking data matrix ouput: column of distances to nearest neighbors in", "have 0-360° headings tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j", "= path_output + \"/\" + name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files", "freq_data = freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index filename_3 = path_output + \"/\" + name_transect", "in ns since 1970 and in string formats - k_dist: distance in m", "the track vit_x = dist_x / delta_t # speed vit_y = dist_y /", "path_output, transducer, freq_TS, TS_parameters, hac_info, orient): \"\"\" Process the pickle file from pymovies", "path_output: path to store output csv - transducer; name of the used transducer", "the run track data filename_1 = path_output + \"/\" + name_transect + \"_tracks.csv\"", "- sd_x, sd_y, sd_z, sd_range, sd_ta: standard deviation of previous displacement and angle", "to the boat else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1]))) cap_abs", "== name_transect: for header in hac_info.columns[1:]: tracks_data[header] = row[header] tracks_data['b20'] = tracks_data['TScomp'] -", "of all targets - freq: mean TScomp for each frequency \"\"\" if path_pickle[-7:]", "= pd.DataFrame( # individual target data { \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]),", "+ \"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files saved !\") freq_data = freq.groupby('TSfreq').mean() freq_data['freq']", "equivalent of the longitude in meters tracks['lat_m'] = tracks.x_gps * 60 * 1852", "frequency - nb_target: number of targets per tracks - timeInt and Time: mean", "j in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) # delta in x", "- freq_TS: reference frequence for TS extraction - TS_parameters: parameter for the TS", "import pandas as pd import numpy as np import functools import os import", "tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical echo sounder tilt_angle", "targets_data = targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) # convert time to int", "in meters \"\"\" tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get position of", "or for the closest frequency from reference frequency - nb_target: number of targets", "the latitude in meters array = np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the", "np.mean(deltas[6]) # mean tilt angle of the track cap_rel = np.mean(deltas[7]) # mean", "'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range', 'sd_x',", "to orientation) - vit_x, vit_y, vit_z, vit_range: speed following different axis - sd_x,", "abs(x - freq_TS)) # closest frequency from the reference # frequency freq_TS TS_freq", "track path_Nv = path_output + '/' + name_transect + \"_Nv.csv\" if os.path.exists(path_Nv): Nv", "['x_gps', 'y_gps', 'z_gps']] # get position of each tracks tracks['long_m'] = tracks.y_gps *", "indexTransducer = j logger.info(\"creating tables...\") # Extract the pickle data in several panda", "NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm distances, indices = nbrs.kneighbors(array) return distances[:, 1]", "get the TScomp for the given reference frequency TS_freq.index = range(len(TS_freq)) logger.info(\"done !\")", "# check if any targets nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets = []", "of tracks with: - track, target: relative and absolute index for each tracks", "name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle) > 0: result = pd.read_pickle(path_pickle) # read", "1]) # delta in y axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) # delta", "= np.sum(deltas[2]) dist_range = np.sum(deltas[3]) dist_tot = dist_x + dist_y + dist_z tilt_angle", "= min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS)) # closest frequency from the reference", "group targets by tracks, keep each parameters as mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) #", "columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range',", "the TScomp for the given reference frequency TS_freq.index = range(len(TS_freq)) logger.info(\"done !\") targets", "= targets_data.loc[ targets_data['track'] == i, ['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']] track_i", "ouput: column of distances to nearest neighbors in meters \"\"\" tracks = tracks_data.loc[:,", "latitude\"\"\" dlon = lon2 - lon1 dist = dlon * (40075000 * math.cos(lat)", "the nearest neighbour - State, Abrv, tailleMoyenne: variables from the hac info file", "distances[:, 1] def conjunction(*conditions): \"\"\"Multiple conditions filter for panda\"\"\" return functools.reduce(np.logical_and, conditions) def", "\"/\" + name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files saved !\") freq_data", "Nv value for each track path_Nv = path_output + '/' + name_transect +", "/ 360) # Get the equivalent of the longitude in meters tracks['lat_m'] =", "if x is coherent (beam is oriented on starboard), corrects direction # accordingly", "and absolute heading is irrelevant on vertical echo sounder deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i)", "range(nb_freq)] targets = pd.DataFrame( # individual target data { \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target),", "time length of the track (s) dist_x = np.sum(deltas[4]) # dist is the", "vit_y, vit_z, vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot, sd_ta, sd_cr, sd_ca] ) dist_scores", "targets for the given sounder and transducer if nb_target > 0: # check", "frequency \"\"\" if path_pickle[-7:] != \".pickle\": # Check the pickle file logger.error(\"Not a", "per tracks tracks_len = pd.DataFrame( {'track': tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index)) ) targets =", "range(len(TS_freq)) logger.info(\"done !\") targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) # merge", "matrix of all targets - freq: mean TScomp for each frequency \"\"\" if", "tracks selection - hac_info: complementary info on the different runs, same for all", "in x axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) # delta in y axis", "targets_data.loc[ targets_data['track'] == i, ['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']] track_i =", "['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']] track_i = track_i.sort_values('timeTarget') # Sort by", "sd_x, sd_y, sd_z, sd_range, sd_ta: standard deviation of previous displacement and angle -", "a distance between 2 longitudes for a given latitude\"\"\" dlon = lon2 -", "index for each tracks - TSrange: mean distance in m to transducer -", "distance between the 2 longitudes if orient == 'H': #Horizontal echo sounder if", "vit_z, vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot, sd_ta, sd_cr, sd_ca] ) dist_scores =", "pd.to_datetime(tracks_data['timeInt']) # panda's datetime tracks_data['k_dist'] = point_processing(tracks_data) # Distance to closest neighbor for", "to the target data targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by track", "- 1] - track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds() # time length of the track", "cap_abs = np.mean(deltas[8]) # mean absolute heading of the track vit_x = dist_x", "freq_TS] # get the TScomp for the given reference frequency TS_freq.index = range(len(TS_freq))", "# mean tilt angle of the track cap_rel = np.mean(deltas[7]) # mean relative", "input: - path_pickle: path to a pickle file, output of movies TS analysis", "outputs: multiple csv - tracks: matrix of tracks with: - track, target: relative", "and angle - sd_tot: sum of standard deviation - targets: matrix of all", "track_i = track_i.sort_values('timeTarget') # Sort by time deltas = [[], [], [], [],", "+ dist_y + dist_z tilt_angle = np.mean(deltas[6]) # mean tilt angle of the", "0: # check if x is coherent (beam is oriented on starboard), corrects", "len(track_i), dist_z / len(track_i), dist_range, dist_tot, tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range,", "matrix of tracks with: - track, target: relative and absolute index for each", "track_i.y.iloc[j - 1]) # delta in y axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1])", "position of each tracks tracks['long_m'] = tracks.y_gps * ( 40075000 * np.cos(tracks.x_gps) /", "target data { \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]),", "[], [], [], []] for j in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j -", "calc_distance_lat(lat1, lat2): \"\"\"Returns a distance between 2 latitudes\"\"\" dlat = lat2 - lat1", "freq_TS: reference frequence for TS extraction - TS_parameters: parameter for the TS detection", "def calc_distance_lat(lat1, lat2): \"\"\"Returns a distance between 2 latitudes\"\"\" dlat = lat2 -", "'sd_ca']) tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge with the main data", "tilt_angle, cap_rel, cap_abs: tilt or heading angle (absolute and relative) in degrees (according", "degrees (according to orientation) - vit_x, vit_y, vit_z, vit_range: speed following different axis", "i, ['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']] track_i = track_i.sort_values('timeTarget') # Sort", "tracks_data[header] = row[header] tracks_data['b20'] = tracks_data['TScomp'] - ( 20 * np.log10(tracks_data['tailleMoyenne'])) # get", "in range(len(result[10])): # get index for the sounder and transducer according to given", "transducer beam - x, y, z, x_gps, y_gps, z_gps: relative and absolute position", "file logger.error(\"Not a pickle file !\") return name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle)", "given reference frequency TS_freq.index = range(len(TS_freq)) logger.info(\"done !\") targets = pd.concat([targets, position, positionGPS,", "of all the data tracks = targets.groupby(by=\"track\").target.agg('count') # get number of target per", "Process the pickle file from pymovies tracking and returns several key parameters for", "data frame logger.info(\"Done !\") logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe())) # Storing 2 different data", "the boat else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1]))) cap_abs =", "= np.mean(deltas[6]) # mean tilt angle of the track cap_rel = np.mean(deltas[7]) #", "* ( 40075000 * np.cos(tracks.x_gps) / 360) # Get the equivalent of the", "closest frequency from the reference # frequency freq_TS TS_freq = freq[freq.TSfreq == freq_TS]", "sd_range = np.std(deltas[3]) sd_ta = np.std(deltas[6]) sd_cr = np.std(deltas[7]) sd_ca = np.std(deltas[8]) sd_tot", "moyenne # get the Nv value for each track path_Nv = path_output +", "standard deviation - targets: matrix of all targets - freq: mean TScomp for", "hac_info columns (same for each run) if row.Name == name_transect: for header in", "[tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the array for nearest neighbors algorithm array =", "- nb_target: number of targets per tracks - timeInt and Time: mean time", "length of the track (s) dist_x = np.sum(deltas[4]) # dist is the length", "TScomp: mean TS of all frequencies or for the closest frequency from reference", "different axis - tilt_angle, cap_rel, cap_abs: tilt or heading angle (absolute and relative)", "panda data frame columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x',", "of all frequencies or for the closest frequency from reference frequency - nb_target:", "tilt or heading angle (absolute and relative) in degrees (according to orientation) -", "\"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq * nb_target) ) # get the position", "by tracks, keep each parameters as mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime", "1])) # absolute (geographical) heading if cap_abs < 0: cap_abs = 360 +", "deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) # delta in x axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j", "np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq * nb_target) ) # get the position of", "on starboard), corrects direction # accordingly cap_rel = abs(math.degrees( math.atan2(deltas[1][j - 1], -", "+ \"/\" + name_transect + \"_tracks.csv\" filename_2 = path_output + \"/\" + name_transect", "the vector. \"\"\" return vector / np.linalg.norm(vector) def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters,", "= freq[freq.TSfreq == freq_TS] # get the TScomp for the given reference frequency", "if any targets nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets = [] for i", "int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets = [] for i in range(nb_target): index_targets += [i", "# heading relative to the boat else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j", "vit_x, vit_y, vit_z, vit_range: speed following different axis - sd_x, sd_y, sd_z, sd_range,", "math.degrees( math.atan2(deltas[5][j - 1], deltas[4][j - 1])) # absolute (geographical) heading if cap_abs", "= np.mean(deltas[7]) # mean relative heading of the track cap_abs = np.mean(deltas[8]) #", "file, output of movies TS analysis - path_output: path to store output csv", "echo sounder tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j -", "each tracks - TSrange: mean distance in m to transducer - TSalong, TSarthwart:", "sd_y = np.std(deltas[5]) sd_z = np.std(deltas[2]) sd_range = np.std(deltas[3]) sd_ta = np.std(deltas[6]) sd_cr", "columns=['x_gps', 'y_gps', 'z_gps']) TS_means = freq.groupby(by=\"target\").mean() # get the TScomp_mean: mean TScomp for", "- 1])) # absolute (geographical) heading if cap_abs < 0: cap_abs = 360", "sd_z, sd_range, sd_tot, sd_ta, sd_cr, sd_ca] ) dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing", "given latitude\"\"\" dlon = lon2 - lon1 dist = dlon * (40075000 *", "pickle file from pymovies tracking and returns several key parameters for each track.", "40075000 * np.cos(tracks.x_gps) / 360) # Get the equivalent of the longitude in", "+ name_transect + \"_Nv.csv\" if os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv)", "parameter for the TS detection and tracks selection - hac_info: complementary info on", "dist_tot: mean displacement in m following different axis - tilt_angle, cap_rel, cap_abs: tilt", "'y', 'z', 'TSrange', 'x_gps', 'y_gps']] track_i = track_i.sort_values('timeTarget') # Sort by time deltas", "\"\"\" Process the pickle file from pymovies tracking and returns several key parameters", "absolute index for each tracks - TSrange: mean distance in m to transducer", "1]) # delta in x axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) # delta", "run track data filename_1 = path_output + \"/\" + name_transect + \"_tracks.csv\" filename_2", "'dist_z', 'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range', 'sd_x', 'sd_y', 'sd_z',", "- TSalong, TSarthwart: mean angle in the transducer beam - x, y, z,", "= pd.merge(targets, tracks_len, how='inner', on='track') # add the track length to the target", "time to int (ns, 1970) logger.info(\"targets ready !\") ##### Tracks grouping and analysis", "all frequencies or for the closest frequency from reference frequency - nb_target: number", "lon2 - lon1 dist = dlon * (40075000 * math.cos(lat) / 360) return", "os.path.getsize(path_pickle) > 0: result = pd.read_pickle(path_pickle) # read the pickle file else: logger.error(\"File", "= freq_data.index filename_3 = path_output + \"/\" + name_transect + \"_freq.csv\" freq_data.to_csv(filename_3, index=False)", "np.std(deltas[3]) sd_ta = np.std(deltas[6]) sd_cr = np.std(deltas[7]) sd_ca = np.std(deltas[8]) sd_tot = sd_x", "= tracks.x_gps * 60 * 1852 # Get the equivalent of the latitude", "read the pickle file else: logger.error(\"File empty !\") # Si le fichier Pickle", "sounder and transducer according to given transducer for j in range(len(result[10][i])): if result[10][i][j]", "- track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds() # time length of the track (s) dist_x", "movement analysis tracks_id = list(targets_data.groupby('track').groups) scores = [] for i in tracks_id: #", "sd_ta, sd_cr, sd_ca] ) dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing values as a", "# storing values as a panda data frame columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range',", "- 1] ** 2), deltas[2][j - 1])) - 90) # tilt angle of", "- 1]))) # heading relative to the boat else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j -", "TScomp for the given reference frequency TS_freq.index = range(len(TS_freq)) logger.info(\"done !\") targets =", "each parameters as mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime tracks_data['k_dist'] = point_processing(tracks_data)", "= np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm distances, indices =", "1], deltas[0][j - 1]))) cap_abs = math.degrees( math.atan2(deltas[5][j - 1], deltas[4][j - 1]))", "coherent (beam is oriented on starboard), corrects direction # accordingly cap_rel = abs(math.degrees(", "several dimensions dist_y = np.sum(deltas[5]) dist_z = np.sum(deltas[2]) dist_range = np.sum(deltas[3]) dist_tot =", "store output csv - transducer; name of the used transducer - freq_TS: reference", "x axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) # delta in y axis deltas[2].append(track_i.z.iloc[j]", "range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1])) # distance between the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j],", "the track length to the target data targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] #", "path to a pickle file, output of movies TS analysis - path_output: path", "= TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS)) # closest", "pd.DataFrame( # TS and frequency data { \"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]),", "functools import os import math logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\" input:", "\"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files saved !\") freq_data = freq.groupby('TSfreq').mean() freq_data['freq'] =", "cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1]))) cap_abs = math.degrees( math.atan2(deltas[5][j -", "* math.cos(lat) / 360) return dist def unit_vector(vector): \"\"\" Returns the unit vector", "keep each parameters as mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime tracks_data['k_dist'] =", "cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot, sd_ta, sd_cr, sd_ca]", "Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv'] = -999 # No Nv data provided # tracks", "multiple csv - tracks: matrix of tracks with: - track, target: relative and", "dist_y, dist_z, dist_range, dist_tot: mean displacement in m following different axis - tilt_angle,", "is oriented on starboard), corrects direction # accordingly cap_rel = abs(math.degrees( math.atan2(deltas[1][j -", "pd.DataFrame(scores, index=range(len(scores)), # storing values as a panda data frame columns=['track', 'dist_x', 'dist_y',", "np.mean(deltas[8]) # mean absolute heading of the track vit_x = dist_x / delta_t", "any targets nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets = [] for i in", "the pickle file else: logger.error(\"File empty !\") # Si le fichier Pickle est", "echo sounder deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds()", "tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot, sd_ta,", "+ \"/\" + name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files saved !\")", "scores = [] for i in tracks_id: # for each track track_i =", "[], [], [], [], [], []] for j in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] -", "# dist is the length of the track on several dimensions dist_y =", "= pd.to_datetime(tracks_data['timeInt']) # panda's datetime tracks_data['k_dist'] = point_processing(tracks_data) # Distance to closest neighbor", "for each tracks - TSrange: mean distance in m to transducer - TSalong,", "angle of the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical echo sounder tilt_angle =", "Nv value - dist_x, dist_y, dist_z, dist_range, dist_tot: mean displacement in m following", "the transducer beam - x, y, z, x_gps, y_gps, z_gps: relative and absolute", "!\") # Si le fichier Pickle est vide logger.info(\"done !\") for i in", "to a pickle file, output of movies TS analysis - path_output: path to", "data frame columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y',", "Time: mean time in ns since 1970 and in string formats - k_dist:", "freq_data['freq'] = freq_data.index filename_3 = path_output + \"/\" + name_transect + \"_freq.csv\" freq_data.to_csv(filename_3,", "targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by track length targets_data = targets_selected.sort_values('track')", "m following different axis - tilt_angle, cap_rel, cap_abs: tilt or heading angle (absolute", "/ 360) return dist def unit_vector(vector): \"\"\" Returns the unit vector of the", "# Check the pickle file logger.error(\"Not a pickle file !\") return name_transect =", "of target per tracks tracks_len = pd.DataFrame( {'track': tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index)) )", "index=range(nb_freq * nb_target) ) # get the position of each targets (relative and", "* 60 * 1852 return dist def calc_distance_long(lat, lon1, lon2): \"\"\"Returns a distance", "distance between 2 latitudes\"\"\" dlat = lat2 - lat1 dist = dlat *", "the used transducer - freq_TS: reference frequence for TS extraction - TS_parameters: parameter", "in range(len(result[10][i])): if result[10][i][j] == transducer: indexSounder = i indexTransducer = j logger.info(\"creating", "targets, with individual targets of each points # - tracks, with the run", "logger.debug(str(tracks_data.describe())) # Storing 2 different data frames as csv: # - targets, with", "nb_target) ) # get the position of each targets (relative and absolute) position", "- 1], deltas[0][j - 1]))) cap_abs = math.degrees( math.atan2(deltas[5][j - 1], deltas[4][j -", "of movies TS analysis - path_output: path to store output csv - transducer;", "panda tables. nb_target = len(result[0][indexSounder][indexTransducer]) # number of targets for the given sounder", "displacement in m following different axis - tilt_angle, cap_rel, cap_abs: tilt or heading", "# tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical echo sounder", "tracks_len = pd.DataFrame( {'track': tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index)) ) targets = pd.merge(targets, tracks_len,", "heading is irrelevant on vertical echo sounder deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i) - 1]", "parameters as mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime tracks_data['k_dist'] = point_processing(tracks_data) #", "vector. \"\"\" return vector / np.linalg.norm(vector) def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info,", "* 1852 # Get the equivalent of the latitude in meters array =", "check if x is coherent (beam is oriented on starboard), corrects direction #", "cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot, sd_ta, sd_cr,", "os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle) > 0: result = pd.read_pickle(path_pickle) # read the pickle", "\"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) ) freq = pd.DataFrame( # TS and", "deltas[8].append(cap_abs) else: #vertical echo sounder tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2", "nearest neighbour - State, Abrv, tailleMoyenne: variables from the hac info file -", "timeInt and Time: mean time in ns since 1970 and in string formats", "logger.info(\"files saved !\") freq_data = freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index filename_3 = path_output +", "value - dist_x, dist_y, dist_z, dist_range, dist_tot: mean displacement in m following different", "from TScomp and taille moyenne # get the Nv value for each track", "for header in hac_info.columns[1:]: tracks_data[header] = row[header] tracks_data['b20'] = tracks_data['TScomp'] - ( 20", "deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical echo sounder tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1]", "from reference frequency - nb_target: number of targets per tracks - timeInt and", "2 different data frames as csv: # - targets, with individual targets of", ") targets = pd.merge(targets, tracks_len, how='inner', on='track') # add the track length to", "track (s) dist_x = np.sum(deltas[4]) # dist is the length of the track", "1] ** 2), deltas[2][j - 1])) - 90) # tilt angle of the", "file - b20: b20 value - Nv: Nv value - dist_x, dist_y, dist_z,", "- k_dist: distance in m to the nearest neighbour - State, Abrv, tailleMoyenne:", "freq[freq.TSfreq == freq_TS] # get the TScomp for the given reference frequency TS_freq.index", "[]] for j in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) # delta", "pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps']) TS_means = freq.groupby(by=\"target\").mean() # get the TScomp_mean:", "track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1])) # distance between the 2 longitudes if orient ==", "as a panda data frame columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel',", "!= \".pickle\": # Check the pickle file logger.error(\"Not a pickle file !\") return", "1], - deltas[0][j - 1]))) # heading relative to the boat else: cap_rel", "relative heading of the track cap_abs = np.mean(deltas[8]) # mean absolute heading of", "2), deltas[2][j - 1])) - 90) # tilt angle of the track deltas[6].append(tilt_angle)", "nb_target) index_targets = [] for i in range(nb_target): index_targets += [i for j", "+ '/' + name_transect + \"_Nv.csv\" if os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv) tracks_data['Nv'] =", "abs(math.degrees( math.atan2(deltas[1][j - 1], - deltas[0][j - 1]))) # heading relative to the", "[i for j in range(nb_freq)] targets = pd.DataFrame( # individual target data {", "\"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq * nb_target) ) # get", "lat2): \"\"\"Returns a distance between 2 latitudes\"\"\" dlat = lat2 - lat1 dist", "/ delta_t # speed vit_y = dist_y / delta_t vit_z = dist_z /", "mean tilt angle of the track cap_rel = np.mean(deltas[7]) # mean relative heading", "1]))) cap_abs = math.degrees( math.atan2(deltas[5][j - 1], deltas[4][j - 1])) # absolute (geographical)", "1852 # Get the equivalent of the latitude in meters array = np.vstack(", "Nv: Nv value - dist_x, dist_y, dist_z, dist_range, dist_tot: mean displacement in m", "the longitude in meters tracks['lat_m'] = tracks.x_gps * 60 * 1852 # Get", "and frequency data { \"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), },", "targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by track length targets_data = targets_selected.sort_values('track') targets_data['timeInt'] =", "in y axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) # delta in z axis", "- deltas[0][j - 1]))) # heading relative to the boat else: cap_rel =", "vector / np.linalg.norm(vector) def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info, orient): \"\"\" Process", "# frequency freq_TS TS_freq = freq[freq.TSfreq == freq_TS] # get the TScomp for", "targets_data['timeTarget'].apply(lambda x: x.value) # convert time to int (ns, 1970) logger.info(\"targets ready !\")", "= sd_x + sd_y + sd_z scores.append( [i, dist_x / len(track_i), dist_y /", "absolute heading of the track vit_x = dist_x / delta_t # speed vit_y", "# panda's datetime tracks_data['k_dist'] = point_processing(tracks_data) # Distance to closest neighbor for index,", "the data tracks = targets.groupby(by=\"track\").target.agg('count') # get number of target per tracks tracks_len", "# get the Nv value for each track path_Nv = path_output + '/'", "cap_abs: tilt or heading angle (absolute and relative) in degrees (according to orientation)", "longitude in meters tracks['lat_m'] = tracks.x_gps * 60 * 1852 # Get the", "np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq * nb_target) ) # get the", "distance between the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1])) # distance between", "longitudes for a given latitude\"\"\" dlon = lon2 - lon1 dist = dlon", "by time deltas = [[], [], [], [], [], [], [], [], []]", "of the track cap_rel = np.mean(deltas[7]) # mean relative heading of the track", "to closest neighbor for index, row in hac_info.iterrows(): # add the hac_info columns", "nb_target = len(result[0][indexSounder][indexTransducer]) # number of targets for the given sounder and transducer", "else: #vertical echo sounder tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 +", "1])) # distance between the 2 longitudes if orient == 'H': #Horizontal echo", "# preparing the array for nearest neighbors algorithm array = np.transpose(array) nbrs =", "\"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), },", "from the hac info file - b20: b20 value - Nv: Nv value", "mean time in ns since 1970 and in string formats - k_dist: distance", "heading of the track vit_x = dist_x / delta_t # speed vit_y =", "'H': #Horizontal echo sounder if track_i.x.iloc[ j] > 0: # check if x", "- dist_x, dist_y, dist_z, dist_range, dist_tot: mean displacement in m following different axis", "deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) # delta in z axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j", "import numpy as np import functools import os import math logger = logging.getLogger('marin')", "tilt angle of the track cap_rel = np.mean(deltas[7]) # mean relative heading of", "len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps']) TS_means", "delta_t.total_seconds() # time length of the track (s) dist_x = np.sum(deltas[4]) # dist", "- track_i.TSrange.iloc[j - 1]) # delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1])) #", "data tracks = targets.groupby(by=\"track\").target.agg('count') # get number of target per tracks tracks_len =", "len(track_i), dist_y / len(track_i), dist_z / len(track_i), dist_range, dist_tot, tilt_angle, cap_rel, cap_abs, vit_x,", "angle of the track cap_rel = np.mean(deltas[7]) # mean relative heading of the", "name_transect + \"_Nv.csv\" if os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv) else:", "tracks with: - track, target: relative and absolute index for each tracks -", "mean TScomp for each frequency \"\"\" if path_pickle[-7:] != \".pickle\": # Check the", "in m following different axis - tilt_angle, cap_rel, cap_abs: tilt or heading angle", "math.cos(lat) / 360) return dist def unit_vector(vector): \"\"\" Returns the unit vector of", "for the sounder and transducer according to given transducer for j in range(len(result[10][i])):", "j] > 0: # check if x is coherent (beam is oriented on", "if orient == 'H': #Horizontal echo sounder if track_i.x.iloc[ j] > 0: #", "[], [], [], [], [], [], [], []] for j in range(1, len(track_i)):", "a panda data frame columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs',", "= i indexTransducer = j logger.info(\"creating tables...\") # Extract the pickle data in", "deltas[2][j - 1])) - 90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(999)", "= targets.groupby(by=\"track\").target.agg('count') # get number of target per tracks tracks_len = pd.DataFrame( {'track':", "# delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1])) # distance between the 2", "else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1]))) cap_abs = math.degrees( math.atan2(deltas[5][j", "'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']] track_i = track_i.sort_values('timeTarget') # Sort by time", "* nb_target) ) # get the position of each targets (relative and absolute)", "vit_z = dist_z / delta_t vit_range = dist_range / delta_t sd_x = np.std(deltas[4])", "row in hac_info.iterrows(): # add the hac_info columns (same for each run) if", "0: # check if any targets nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets =", "list(targets_data.groupby('track').groups) scores = [] for i in tracks_id: # for each track track_i", "= dlat * 60 * 1852 return dist def calc_distance_long(lat, lon1, lon2): \"\"\"Returns", "\"\"\" if path_pickle[-7:] != \".pickle\": # Check the pickle file logger.error(\"Not a pickle", "(geographical) heading if cap_abs < 0: cap_abs = 360 + cap_abs # correct", "absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0,", "lon1, lon2): \"\"\"Returns a distance between 2 longitudes for a given latitude\"\"\" dlon", "of the track deltas[6].append(tilt_angle) deltas[7].append(999) # relative and absolute heading is irrelevant on", "pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge with the main data frame logger.info(\"Done !\")", "= pd.DataFrame( {'track': tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index)) ) targets = pd.merge(targets, tracks_len, how='inner',", "= path_output + '/' + name_transect + \"_Nv.csv\" if os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv)", "deviation of previous displacement and angle - sd_tot: sum of standard deviation -", "# get the TScomp_mean: mean TScomp for all frequencies TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'})", "TScomp for all frequencies TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']), key=lambda x:", "+= [i for j in range(nb_freq)] targets = pd.DataFrame( # individual target data", "between 2 longitudes for a given latitude\"\"\" dlon = lon2 - lon1 dist", "the 2 longitudes if orient == 'H': #Horizontal echo sounder if track_i.x.iloc[ j]", "len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps']) TS_means = freq.groupby(by=\"target\").mean() # get the TScomp_mean: mean TScomp", "[], []] for j in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) #", "name_transect: for header in hac_info.columns[1:]: tracks_data[header] = row[header] tracks_data['b20'] = tracks_data['TScomp'] - (", "- transducer; name of the used transducer - freq_TS: reference frequence for TS", "dist_tot = dist_x + dist_y + dist_z tilt_angle = np.mean(deltas[6]) # mean tilt", "math logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\" input: tracking data matrix ouput:", "file from pymovies tracking and returns several key parameters for each track. input:", "tracks_data = targets_data.groupby('track').mean() # group targets by tracks, keep each parameters as mean", "filter for panda\"\"\" return functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1, lat2): \"\"\"Returns a distance between", "freq_data.index filename_3 = path_output + \"/\" + name_transect + \"_freq.csv\" freq_data.to_csv(filename_3, index=False) else:", "and absolute index for each tracks - TSrange: mean distance in m to", "in m to transducer - TSalong, TSarthwart: mean angle in the transducer beam", "get index for the sounder and transducer according to given transducer for j", "== 'H': #Horizontal echo sounder if track_i.x.iloc[ j] > 0: # check if", "distances, indices = nbrs.kneighbors(array) return distances[:, 1] def conjunction(*conditions): \"\"\"Multiple conditions filter for", "extraction - TS_parameters: parameter for the TS detection and tracks selection - hac_info:", "'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps']) TS_means = freq.groupby(by=\"target\").mean() #", "direction # accordingly cap_rel = abs(math.degrees( math.atan2(deltas[1][j - 1], - deltas[0][j - 1])))", "def calc_distance_long(lat, lon1, lon2): \"\"\"Returns a distance between 2 longitudes for a given", "# distance between the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1])) # distance", "for each track track_i = targets_data.loc[ targets_data['track'] == i, ['timeTarget', 'x', 'y', 'z',", "(according to orientation) - vit_x, vit_y, vit_z, vit_range: speed following different axis -", "dist_x, dist_y, dist_z, dist_range, dist_tot: mean displacement in m following different axis -", "'sd_ta', 'sd_cr', 'sd_ca']) tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge with the", "2 longitudes if orient == 'H': #Horizontal echo sounder if track_i.x.iloc[ j] >", "tracks.x_gps * 60 * 1852 # Get the equivalent of the latitude in", "np.sum(deltas[5]) dist_z = np.sum(deltas[2]) dist_range = np.sum(deltas[3]) dist_tot = dist_x + dist_y +", "!\") freq_data = freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index filename_3 = path_output + \"/\" +", "- x, y, z, x_gps, y_gps, z_gps: relative and absolute position - TScomp_mean,", "calc_distance_long(lat, lon1, lon2): \"\"\"Returns a distance between 2 longitudes for a given latitude\"\"\"", "# get position of each tracks tracks['long_m'] = tracks.y_gps * ( 40075000 *", "= j logger.info(\"creating tables...\") # Extract the pickle data in several panda tables.", "'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca']) tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track') #", "meters \"\"\" tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get position of each", "detection and tracks selection - hac_info: complementary info on the different runs, same", "result[10][i][j] == transducer: indexSounder = i indexTransducer = j logger.info(\"creating tables...\") # Extract", "points # - tracks, with the run track data filename_1 = path_output +", "dist_x / delta_t # speed vit_y = dist_y / delta_t vit_z = dist_z", "several key parameters for each track. input: - path_pickle: path to a pickle", "= np.std(deltas[7]) sd_ca = np.std(deltas[8]) sd_tot = sd_x + sd_y + sd_z scores.append(", "time deltas = [[], [], [], [], [], [], [], [], []] for", "data { \"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq *", "np.linalg.norm(vector) def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info, orient): \"\"\" Process the pickle", "'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range', 'sd_x', 'sd_y', 'sd_z', 'sd_range',", "track, target: relative and absolute index for each tracks - TSrange: mean distance", "= pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge with the main data frame logger.info(\"Done", "60 * 1852 return dist def calc_distance_long(lat, lon1, lon2): \"\"\"Returns a distance between", "tracks data...') tracks_data = targets_data.groupby('track').mean() # group targets by tracks, keep each parameters", "def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info, orient): \"\"\" Process the pickle file", "# get the b20 from TScomp and taille moyenne # get the Nv", "= np.std(deltas[3]) sd_ta = np.std(deltas[6]) sd_cr = np.std(deltas[7]) sd_ca = np.std(deltas[8]) sd_tot =", "track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else: #vertical echo sounder tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j -", "- TSrange: mean distance in m to transducer - TSalong, TSarthwart: mean angle", "y axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) # delta in z axis deltas[3].append(track_i.TSrange.iloc[j]", "conditions) def calc_distance_lat(lat1, lat2): \"\"\"Returns a distance between 2 latitudes\"\"\" dlat = lat2", "np.std(deltas[2]) sd_range = np.std(deltas[3]) sd_ta = np.std(deltas[6]) sd_cr = np.std(deltas[7]) sd_ca = np.std(deltas[8])", "info on the different runs, same for all tracks of each run -", "mean displacement in m following different axis - tilt_angle, cap_rel, cap_abs: tilt or", "frequency TS_freq.index = range(len(TS_freq)) logger.info(\"done !\") targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']],", "20 * np.log10(tracks_data['tailleMoyenne'])) # get the b20 from TScomp and taille moyenne #", "heading of the track cap_abs = np.mean(deltas[8]) # mean absolute heading of the", "used transducer - freq_TS: reference frequence for TS extraction - TS_parameters: parameter for", "deltas = [[], [], [], [], [], [], [], [], []] for j", "- hac_info: complementary info on the different runs, same for all tracks of", "- tilt_angle, cap_rel, cap_abs: tilt or heading angle (absolute and relative) in degrees", "+ deltas[1][j - 1] ** 2), deltas[2][j - 1])) - 90) # tilt", "+ sd_y + sd_z scores.append( [i, dist_x / len(track_i), dist_y / len(track_i), dist_z", "freq: mean TScomp for each frequency \"\"\" if path_pickle[-7:] != \".pickle\": # Check", "index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files saved !\") freq_data = freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index filename_3", "[[], [], [], [], [], [], [], [], []] for j in range(1,", "TScomp for each frequency \"\"\" if path_pickle[-7:] != \".pickle\": # Check the pickle", "pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) # merge of all the data tracks", "tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge with the main data frame", "= dist_x + dist_y + dist_z tilt_angle = np.mean(deltas[6]) # mean tilt angle", "math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2), deltas[2][j -", "= freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index filename_3 = path_output + \"/\" + name_transect +", "between the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1])) # distance between the", "delta in x axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) # delta in y", "'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range', 'sd_x', 'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr',", "frequency freq_TS TS_freq = freq[freq.TSfreq == freq_TS] # get the TScomp for the", "tracks, keep each parameters as mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime tracks_data['k_dist']", "\"\"\" return vector / np.linalg.norm(vector) def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info, orient):", "tables. nb_target = len(result[0][indexSounder][indexTransducer]) # number of targets for the given sounder and", "freq = pd.DataFrame( # TS and frequency data { \"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]),", "cap_abs # correct to have 0-360° headings tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1]", "tracks movement analysis tracks_id = list(targets_data.groupby('track').groups) scores = [] for i in tracks_id:", "or 'V') outputs: multiple csv - tracks: matrix of tracks with: - track,", "# check if x is coherent (beam is oriented on starboard), corrects direction", "range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) ) freq", "= np.sum(deltas[5]) dist_z = np.sum(deltas[2]) dist_range = np.sum(deltas[3]) dist_tot = dist_x + dist_y", "preparing the array for nearest neighbors algorithm array = np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2,", "as csv: # - targets, with individual targets of each points # -", "np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) ) freq = pd.DataFrame( # TS and frequency", "\"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq * nb_target) ) # get the position of each", "for each frequency \"\"\" if path_pickle[-7:] != \".pickle\": # Check the pickle file", "angle in the transducer beam - x, y, z, x_gps, y_gps, z_gps: relative", "# get index for the sounder and transducer according to given transducer for", "np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target)", "scores.append( [i, dist_x / len(track_i), dist_y / len(track_i), dist_z / len(track_i), dist_range, dist_tot,", "\".pickle\": # Check the pickle file logger.error(\"Not a pickle file !\") return name_transect", "- 1], - deltas[0][j - 1]))) # heading relative to the boat else:", "targets.groupby(by=\"track\").target.agg('count') # get number of target per tracks tracks_len = pd.DataFrame( {'track': tracks.index,", "- 1]) # delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1])) # distance between", "\"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) ) freq = pd.DataFrame( # TS and frequency data", "logger.info(\"done !\") for i in range(len(result[10])): # get index for the sounder and", "-999 # No Nv data provided # tracks movement analysis tracks_id = list(targets_data.groupby('track').groups)", "- path_output: path to store output csv - transducer; name of the used", "# get number of target per tracks tracks_len = pd.DataFrame( {'track': tracks.index, 'nb_target':", "for panda\"\"\" return functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1, lat2): \"\"\"Returns a distance between 2", "i indexTransducer = j logger.info(\"creating tables...\") # Extract the pickle data in several", "= pd.read_pickle(path_pickle) # read the pickle file else: logger.error(\"File empty !\") # Si", "= np.std(deltas[6]) sd_cr = np.std(deltas[7]) sd_ca = np.std(deltas[8]) sd_tot = sd_x + sd_y", "delta_t vit_z = dist_z / delta_t vit_range = dist_range / delta_t sd_x =", "- 1])) # distance between the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1]))", "targets_data.to_csv(filename_2, index=False) logger.info(\"files saved !\") freq_data = freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index filename_3 =", "# read the pickle file else: logger.error(\"File empty !\") # Si le fichier", "# tracks movement analysis tracks_id = list(targets_data.groupby('track').groups) scores = [] for i in", "transducer - TSalong, TSarthwart: mean angle in the transducer beam - x, y,", "= pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps',", "return dist def calc_distance_long(lat, lon1, lon2): \"\"\"Returns a distance between 2 longitudes for", "index=range(len(tracks.index)) ) targets = pd.merge(targets, tracks_len, how='inner', on='track') # add the track length", "def unit_vector(vector): \"\"\" Returns the unit vector of the vector. \"\"\" return vector", "tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files saved !\") freq_data = freq.groupby('TSfreq').mean() freq_data['freq'] = freq_data.index", "check if any targets nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets = [] for", "360 + cap_abs # correct to have 0-360° headings tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j", "the given reference frequency TS_freq.index = range(len(TS_freq)) logger.info(\"done !\") targets = pd.concat([targets, position,", "- Nv: Nv value - dist_x, dist_y, dist_z, dist_range, dist_tot: mean displacement in", "orientation) - vit_x, vit_y, vit_z, vit_range: speed following different axis - sd_x, sd_y,", "in meters array = np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the array for", "delta_t = delta_t.total_seconds() # time length of the track (s) dist_x = np.sum(deltas[4])", "cap_rel = np.mean(deltas[7]) # mean relative heading of the track cap_abs = np.mean(deltas[8])", "accordingly cap_rel = abs(math.degrees( math.atan2(deltas[1][j - 1], - deltas[0][j - 1]))) # heading", "csv - transducer; name of the used transducer - freq_TS: reference frequence for", "if result[10][i][j] == transducer: indexSounder = i indexTransducer = j logger.info(\"creating tables...\") #", "delta in z axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) # delta in range", "tracks.long_m, tracks.z_gps]) # preparing the array for nearest neighbors algorithm array = np.transpose(array)", "irrelevant on vertical echo sounder deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0]", "TSalong, TSarthwart: mean angle in the transducer beam - x, y, z, x_gps,", "Select by track length targets_data = targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) #", "neighbour - State, Abrv, tailleMoyenne: variables from the hac info file - b20:", "hac_info: complementary info on the different runs, same for all tracks of each", "in hac_info.iterrows(): # add the hac_info columns (same for each run) if row.Name", ":') logger.debug(str(tracks_data.describe())) # Storing 2 different data frames as csv: # - targets,", "= pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv'] = -999 # No Nv", "Get the equivalent of the latitude in meters array = np.vstack( [tracks.lat_m, tracks.long_m,", "individual target data { \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\":", "targets: matrix of all targets - freq: mean TScomp for each frequency \"\"\"", "- TScomp_mean, TScomp: mean TS of all frequencies or for the closest frequency", "since 1970 and in string formats - k_dist: distance in m to the", ") freq = pd.DataFrame( # TS and frequency data { \"target\": index_targets, \"TScomp\":", "heading if cap_abs < 0: cap_abs = 360 + cap_abs # correct to", "j in range(nb_freq)] targets = pd.DataFrame( # individual target data { \"track\": np.array(result[8][indexSounder][indexTransducer]),", "np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) ) freq = pd.DataFrame( # TS", "oriented on starboard), corrects direction # accordingly cap_rel = abs(math.degrees( math.atan2(deltas[1][j - 1],", "input: tracking data matrix ouput: column of distances to nearest neighbors in meters", "- 1])) - 90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs)", "tables...\") # Extract the pickle data in several panda tables. nb_target = len(result[0][indexSounder][indexTransducer])", "on='track') # add the track length to the target data targets_selected = targets.loc[targets['nb_target']", "1] ** 2 + deltas[1][j - 1] ** 2), deltas[2][j - 1])) -", "[i, dist_x / len(track_i), dist_y / len(track_i), dist_z / len(track_i), dist_range, dist_tot, tilt_angle,", "TS_means = freq.groupby(by=\"target\").mean() # get the TScomp_mean: mean TScomp for all frequencies TS_means", "tailleMoyenne: variables from the hac info file - b20: b20 value - Nv:", "filename_2 = path_output + \"/\" + name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False)", "row[header] tracks_data['b20'] = tracks_data['TScomp'] - ( 20 * np.log10(tracks_data['tailleMoyenne'])) # get the b20", "sd_z, sd_range, sd_ta: standard deviation of previous displacement and angle - sd_tot: sum", "dist_z, dist_range, dist_tot: mean displacement in m following different axis - tilt_angle, cap_rel,", "'y', 'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps']) TS_means = freq.groupby(by=\"target\").mean()", "range(len(result[10])): # get index for the sounder and transducer according to given transducer", "with the run track data filename_1 = path_output + \"/\" + name_transect +", "deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) # delta in y axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j", "in string formats - k_dist: distance in m to the nearest neighbour -", "columns=['x', 'y', 'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps']) TS_means =", "dist_scores, how='inner', on='track') # merge with the main data frame logger.info(\"Done !\") logger.debug('Tracks", "result = pd.read_pickle(path_pickle) # read the pickle file else: logger.error(\"File empty !\") #", "index for the sounder and transducer according to given transducer for j in", "(math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2), deltas[2][j", "track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds() # time length of the track (s) dist_x =", "panda\"\"\" return functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1, lat2): \"\"\"Returns a distance between 2 latitudes\"\"\"", "and returns several key parameters for each track. input: - path_pickle: path to", "targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) # convert time to int (ns, 1970) logger.info(\"targets", "the track deltas[6].append(tilt_angle) deltas[7].append(999) # relative and absolute heading is irrelevant on vertical", "= -999 # No Nv data provided # tracks movement analysis tracks_id =", "z, x_gps, y_gps, z_gps: relative and absolute position - TScomp_mean, TScomp: mean TS", "for i in tracks_id: # for each track track_i = targets_data.loc[ targets_data['track'] ==", "storing values as a panda data frame columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot',", "get the b20 from TScomp and taille moyenne # get the Nv value", "on vertical echo sounder deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0] delta_t", "= np.std(deltas[4]) # standard deviation sd_y = np.std(deltas[5]) sd_z = np.std(deltas[2]) sd_range =", "targets of each points # - tracks, with the run track data filename_1", "empty !\") # Si le fichier Pickle est vide logger.info(\"done !\") for i", "corrects direction # accordingly cap_rel = abs(math.degrees( math.atan2(deltas[1][j - 1], - deltas[0][j -", "and absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer],", "** 2), deltas[2][j - 1])) - 90) # tilt angle of the track", "'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range', 'sd_x', 'sd_y', 'sd_z', 'sd_range', 'sd_tot',", "abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1]))) cap_abs = math.degrees( math.atan2(deltas[5][j - 1], deltas[4][j", "distance between 2 longitudes for a given latitude\"\"\" dlon = lon2 - lon1", "track cap_abs = np.mean(deltas[8]) # mean absolute heading of the track vit_x =", "per tracks - timeInt and Time: mean time in ns since 1970 and", "parameters for each track. input: - path_pickle: path to a pickle file, output", "run) if row.Name == name_transect: for header in hac_info.columns[1:]: tracks_data[header] = row[header] tracks_data['b20']", "m to the nearest neighbour - State, Abrv, tailleMoyenne: variables from the hac", "given transducer for j in range(len(result[10][i])): if result[10][i][j] == transducer: indexSounder = i", "/ delta_t vit_z = dist_z / delta_t vit_range = dist_range / delta_t sd_x", "sounder if track_i.x.iloc[ j] > 0: # check if x is coherent (beam", "and in string formats - k_dist: distance in m to the nearest neighbour", "with: - track, target: relative and absolute index for each tracks - TSrange:", "vit_y, vit_z, vit_range: speed following different axis - sd_x, sd_y, sd_z, sd_range, sd_ta:", "deltas[1][j - 1] ** 2), deltas[2][j - 1])) - 90) # tilt angle", "range(nb_target): index_targets += [i for j in range(nb_freq)] targets = pd.DataFrame( # individual", "ns since 1970 and in string formats - k_dist: distance in m to", "(relative and absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z']) positionGPS =", "Tracks grouping and analysis logger.info('Gathering tracks data...') tracks_data = targets_data.groupby('track').mean() # group targets", "k_dist: distance in m to the nearest neighbour - State, Abrv, tailleMoyenne: variables", "nearest neighbors in meters \"\"\" tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get", "= np.std(deltas[5]) sd_z = np.std(deltas[2]) sd_range = np.std(deltas[3]) sd_ta = np.std(deltas[6]) sd_cr =", "dist_range / delta_t sd_x = np.std(deltas[4]) # standard deviation sd_y = np.std(deltas[5]) sd_z", "targets nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets = [] for i in range(nb_target):", "the equivalent of the latitude in meters array = np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps])", "tracks['lat_m'] = tracks.x_gps * 60 * 1852 # Get the equivalent of the", "[], [], [], [], [], [], []] for j in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j]", "- 90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(999) # relative and", "vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot, sd_ta, sd_cr, sd_ca] ) dist_scores = pd.DataFrame(scores,", "'V') outputs: multiple csv - tracks: matrix of tracks with: - track, target:", "targets_selected.sort_values('track') targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) # convert time to int (ns, 1970)", "+ name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files saved !\") freq_data =", "nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets = [] for i in range(nb_target): index_targets", "Si le fichier Pickle est vide logger.info(\"done !\") for i in range(len(result[10])): #", "for each track path_Nv = path_output + '/' + name_transect + \"_Nv.csv\" if", "of the latitude in meters array = np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing", "data { \"track\": np.array(result[8][indexSounder][indexTransducer]), \"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\":", "hac_info, orient): \"\"\" Process the pickle file from pymovies tracking and returns several", "latitude in meters array = np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the array", "hac info file - b20: b20 value - Nv: Nv value - dist_x,", "= pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) # merge of all the data", "== freq_TS] # get the TScomp for the given reference frequency TS_freq.index =", "hac_info.iterrows(): # add the hac_info columns (same for each run) if row.Name ==", "vector of the vector. \"\"\" return vector / np.linalg.norm(vector) def pickle_processing(path_pickle, path_output, transducer,", "path_pickle: path to a pickle file, output of movies TS analysis - path_output:", "get number of target per tracks tracks_len = pd.DataFrame( {'track': tracks.index, 'nb_target': tracks.values},", "is the length of the track on several dimensions dist_y = np.sum(deltas[5]) dist_z", "index=range(nb_target) ) freq = pd.DataFrame( # TS and frequency data { \"target\": index_targets,", "numpy as np import functools import os import math logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG)", "the track on several dimensions dist_y = np.sum(deltas[5]) dist_z = np.sum(deltas[2]) dist_range =", "len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) # delta in x axis deltas[1].append(track_i.y.iloc[j] -", "pickle data in several panda tables. nb_target = len(result[0][indexSounder][indexTransducer]) # number of targets", "sd_tot = sd_x + sd_y + sd_z scores.append( [i, dist_x / len(track_i), dist_y", "data provided # tracks movement analysis tracks_id = list(targets_data.groupby('track').groups) scores = [] for", "frequencies TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS))", "boat else: cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1]))) cap_abs = math.degrees(", "- orient: orientation ('H' or 'V') outputs: multiple csv - tracks: matrix of", "to int (ns, 1970) logger.info(\"targets ready !\") ##### Tracks grouping and analysis logger.info('Gathering", "'nb_target': tracks.values}, index=range(len(tracks.index)) ) targets = pd.merge(targets, tracks_len, how='inner', on='track') # add the", "according to given transducer for j in range(len(result[10][i])): if result[10][i][j] == transducer: indexSounder", "of the longitude in meters tracks['lat_m'] = tracks.x_gps * 60 * 1852 #", "logger.error(\"Not a pickle file !\") return name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if os.path.getsize(path_pickle) >", "axis - tilt_angle, cap_rel, cap_abs: tilt or heading angle (absolute and relative) in", "np.std(deltas[4]) # standard deviation sd_y = np.std(deltas[5]) sd_z = np.std(deltas[2]) sd_range = np.std(deltas[3])", "x.value) # convert time to int (ns, 1970) logger.info(\"targets ready !\") ##### Tracks", "- targets: matrix of all targets - freq: mean TScomp for each frequency", "for j in range(nb_freq)] targets = pd.DataFrame( # individual target data { \"track\":", "\"\"\"Multiple conditions filter for panda\"\"\" return functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1, lat2): \"\"\"Returns a", "State, Abrv, tailleMoyenne: variables from the hac info file - b20: b20 value", "- State, Abrv, tailleMoyenne: variables from the hac info file - b20: b20", "and transducer according to given transducer for j in range(len(result[10][i])): if result[10][i][j] ==", "(beam is oriented on starboard), corrects direction # accordingly cap_rel = abs(math.degrees( math.atan2(deltas[1][j", "if os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv'] = -999", "for each track. input: - path_pickle: path to a pickle file, output of", "to transducer - TSalong, TSarthwart: mean angle in the transducer beam - x,", "##### Tracks grouping and analysis logger.info('Gathering tracks data...') tracks_data = targets_data.groupby('track').mean() # group", "transducer: indexSounder = i indexTransducer = j logger.info(\"creating tables...\") # Extract the pickle", "logger.info(\"reading...\") if os.path.getsize(path_pickle) > 0: result = pd.read_pickle(path_pickle) # read the pickle file", "dist_z / delta_t vit_range = dist_range / delta_t sd_x = np.std(deltas[4]) # standard", "tracks_id = list(targets_data.groupby('track').groups) scores = [] for i in tracks_id: # for each", "\"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) ) freq =", "/ delta_t vit_range = dist_range / delta_t sd_x = np.std(deltas[4]) # standard deviation", "several panda tables. nb_target = len(result[0][indexSounder][indexTransducer]) # number of targets for the given", "- 1] ** 2 + deltas[1][j - 1] ** 2), deltas[2][j - 1]))", "if row.Name == name_transect: for header in hac_info.columns[1:]: tracks_data[header] = row[header] tracks_data['b20'] =", "functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1, lat2): \"\"\"Returns a distance between 2 latitudes\"\"\" dlat =", "targets_data['track'] == i, ['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']] track_i = track_i.sort_values('timeTarget')", "transducer - freq_TS: reference frequence for TS extraction - TS_parameters: parameter for the", "position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) # merge of all the data tracks =", "all frequencies TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x -", "deviation sd_y = np.std(deltas[5]) sd_z = np.std(deltas[2]) sd_range = np.std(deltas[3]) sd_ta = np.std(deltas[6])", "(absolute and relative) in degrees (according to orientation) - vit_x, vit_y, vit_z, vit_range:", "(same for each run) if row.Name == name_transect: for header in hac_info.columns[1:]: tracks_data[header]", "np.std(deltas[6]) sd_cr = np.std(deltas[7]) sd_ca = np.std(deltas[8]) sd_tot = sd_x + sd_y +", "conditions filter for panda\"\"\" return functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1, lat2): \"\"\"Returns a distance", "for the TS detection and tracks selection - hac_info: complementary info on the", "formats - k_dist: distance in m to the nearest neighbour - State, Abrv,", "transducer; name of the used transducer - freq_TS: reference frequence for TS extraction", "sd_z scores.append( [i, dist_x / len(track_i), dist_y / len(track_i), dist_z / len(track_i), dist_range,", "[], [], []] for j in range(1, len(track_i)): deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1])", "!\") logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe())) # Storing 2 different data frames as csv:", "path_pickle[-7:] != \".pickle\": # Check the pickle file logger.error(\"Not a pickle file !\")", "the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1])) # distance between the 2", "frames as csv: # - targets, with individual targets of each points #", "analysis logger.info('Gathering tracks data...') tracks_data = targets_data.groupby('track').mean() # group targets by tracks, keep", "Nv data provided # tracks movement analysis tracks_id = list(targets_data.groupby('track').groups) scores = []", "'y_gps']] track_i = track_i.sort_values('timeTarget') # Sort by time deltas = [[], [], [],", "the given sounder and transducer if nb_target > 0: # check if any", "from pymovies tracking and returns several key parameters for each track. input: -", "angle of the track deltas[6].append(tilt_angle) deltas[7].append(999) # relative and absolute heading is irrelevant", "dist_z = np.sum(deltas[2]) dist_range = np.sum(deltas[3]) dist_tot = dist_x + dist_y + dist_z", "def conjunction(*conditions): \"\"\"Multiple conditions filter for panda\"\"\" return functools.reduce(np.logical_and, conditions) def calc_distance_lat(lat1, lat2):", "- b20: b20 value - Nv: Nv value - dist_x, dist_y, dist_z, dist_range,", "Get the equivalent of the longitude in meters tracks['lat_m'] = tracks.x_gps * 60", "logger.info('Gathering tracks data...') tracks_data = targets_data.groupby('track').mean() # group targets by tracks, keep each", "neighbors algorithm distances, indices = nbrs.kneighbors(array) return distances[:, 1] def conjunction(*conditions): \"\"\"Multiple conditions", "cap_abs = 360 + cap_abs # correct to have 0-360° headings tilt_angle =", "y, z, x_gps, y_gps, z_gps: relative and absolute position - TScomp_mean, TScomp: mean", "* np.cos(tracks.x_gps) / 360) # Get the equivalent of the longitude in meters", "delta_t = track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds() # time length", "output csv - transducer; name of the used transducer - freq_TS: reference frequence", "freq_TS, TS_parameters, hac_info, orient): \"\"\" Process the pickle file from pymovies tracking and", "on='track') # merge with the main data frame logger.info(\"Done !\") logger.debug('Tracks summary :')", "# delta in z axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) # delta in", "- lat1 dist = dlat * 60 * 1852 return dist def calc_distance_long(lat,", "targets (relative and absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z']) positionGPS", "= track_i.sort_values('timeTarget') # Sort by time deltas = [[], [], [], [], [],", "all tracks of each run - orient: orientation ('H' or 'V') outputs: multiple", "# speed vit_y = dist_y / delta_t vit_z = dist_z / delta_t vit_range", "is coherent (beam is oriented on starboard), corrects direction # accordingly cap_rel =", "= [[], [], [], [], [], [], [], [], []] for j in", "\"target\": range(nb_target), \"timeTarget\": np.array(result[0][indexSounder][indexTransducer]), \"TSrange\": np.array(result[1][indexSounder][indexTransducer]), \"TSalong\": np.array(result[4][indexSounder][indexTransducer]), \"TSathwart\": np.array(result[5][indexSounder][indexTransducer]), }, index=range(nb_target) )", "= nbrs.kneighbors(array) return distances[:, 1] def conjunction(*conditions): \"\"\"Multiple conditions filter for panda\"\"\" return", "in hac_info.columns[1:]: tracks_data[header] = row[header] tracks_data['b20'] = tracks_data['TScomp'] - ( 20 * np.log10(tracks_data['tailleMoyenne']))", "= range(len(TS_freq)) logger.info(\"done !\") targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) #", "each tracks tracks['long_m'] = tracks.y_gps * ( 40075000 * np.cos(tracks.x_gps) / 360) #", "reference # frequency freq_TS TS_freq = freq[freq.TSfreq == freq_TS] # get the TScomp", "in z axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) # delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j],", "convert time to int (ns, 1970) logger.info(\"targets ready !\") ##### Tracks grouping and", "of each points # - tracks, with the run track data filename_1 =", "algorithm='ball_tree').fit(array) # nearest neighbors algorithm distances, indices = nbrs.kneighbors(array) return distances[:, 1] def", "> 0: # check if x is coherent (beam is oriented on starboard),", "dist is the length of the track on several dimensions dist_y = np.sum(deltas[5])", "# convert time to int (ns, 1970) logger.info(\"targets ready !\") ##### Tracks grouping", "deltas[0][j - 1]))) cap_abs = math.degrees( math.atan2(deltas[5][j - 1], deltas[4][j - 1])) #", "sounder and transducer if nb_target > 0: # check if any targets nb_freq", "angle - sd_tot: sum of standard deviation - targets: matrix of all targets", "each run) if row.Name == name_transect: for header in hac_info.columns[1:]: tracks_data[header] = row[header]", "= int(len(result[9][indexSounder][indexTransducer]) / nb_target) index_targets = [] for i in range(nb_target): index_targets +=", "== transducer: indexSounder = i indexTransducer = j logger.info(\"creating tables...\") # Extract the", "pickle file logger.error(\"Not a pickle file !\") return name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\") if", "pd import numpy as np import functools import os import math logger =", "as pd import numpy as np import functools import os import math logger", "and transducer if nb_target > 0: # check if any targets nb_freq =", "= pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x_gps', 'y_gps', 'z_gps']) TS_means = freq.groupby(by=\"target\").mean() # get the", "path_output + \"/\" + name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2, index=False) logger.info(\"files saved", "the sounder and transducer according to given transducer for j in range(len(result[10][i])): if", "/ len(track_i), dist_y / len(track_i), dist_z / len(track_i), dist_range, dist_tot, tilt_angle, cap_rel, cap_abs,", "# add the track length to the target data targets_selected = targets.loc[targets['nb_target'] >=", "# get the TScomp for the given reference frequency TS_freq.index = range(len(TS_freq)) logger.info(\"done", "# mean relative heading of the track cap_abs = np.mean(deltas[8]) # mean absolute", "'vit_z', 'vit_range', 'sd_x', 'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca']) tracks_data = pd.merge(tracks_data,", "frame columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot', 'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z',", "deviation - targets: matrix of all targets - freq: mean TScomp for each", "= targets_data.groupby('track').mean() # group targets by tracks, keep each parameters as mean tracks_data['Time']", "neighbor for index, row in hac_info.iterrows(): # add the hac_info columns (same for", "tracks_data['k_dist'] = point_processing(tracks_data) # Distance to closest neighbor for index, row in hac_info.iterrows():", "add the hac_info columns (same for each run) if row.Name == name_transect: for", "deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) # delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1]))", "speed following different axis - sd_x, sd_y, sd_z, sd_range, sd_ta: standard deviation of", "distances to nearest neighbors in meters \"\"\" tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']]", "+ \"_tracks.csv\" filename_2 = path_output + \"/\" + name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1, index=False)", "mean absolute heading of the track vit_x = dist_x / delta_t # speed", "tracks tracks_len = pd.DataFrame( {'track': tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index)) ) targets = pd.merge(targets,", "i in range(len(result[10])): # get index for the sounder and transducer according to", "1])) - 90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(cap_rel) deltas[8].append(cap_abs) else:", "# absolute (geographical) heading if cap_abs < 0: cap_abs = 360 + cap_abs", "selection - hac_info: complementary info on the different runs, same for all tracks", "pickle file else: logger.error(\"File empty !\") # Si le fichier Pickle est vide", "targets by tracks, keep each parameters as mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's", "nb_target > 0: # check if any targets nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target)", "np.std(deltas[7]) sd_ca = np.std(deltas[8]) sd_tot = sd_x + sd_y + sd_z scores.append( [i,", "vit_y = dist_y / delta_t vit_z = dist_z / delta_t vit_range = dist_range", ") dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing values as a panda data frame", "tracks.y_gps * ( 40075000 * np.cos(tracks.x_gps) / 360) # Get the equivalent of", "sd_x = np.std(deltas[4]) # standard deviation sd_y = np.std(deltas[5]) sd_z = np.std(deltas[2]) sd_range", "2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j - 1])) # distance between the 2 longitudes", "length of the track on several dimensions dist_y = np.sum(deltas[5]) dist_z = np.sum(deltas[2])", "- track, target: relative and absolute index for each tracks - TSrange: mean", "unit_vector(vector): \"\"\" Returns the unit vector of the vector. \"\"\" return vector /", "# closest frequency from the reference # frequency freq_TS TS_freq = freq[freq.TSfreq ==", "cap_abs < 0: cap_abs = 360 + cap_abs # correct to have 0-360°", "in tracks_id: # for each track track_i = targets_data.loc[ targets_data['track'] == i, ['timeTarget',", "freq.groupby(by=\"target\").mean() # get the TScomp_mean: mean TScomp for all frequencies TS_means = TS_means.rename(columns={'TScomp':", "x is coherent (beam is oriented on starboard), corrects direction # accordingly cap_rel", "/ delta_t sd_x = np.std(deltas[4]) # standard deviation sd_y = np.std(deltas[5]) sd_z =", "and tracks selection - hac_info: complementary info on the different runs, same for", "pd.DataFrame( {'track': tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index)) ) targets = pd.merge(targets, tracks_len, how='inner', on='track')", "y_gps, z_gps: relative and absolute position - TScomp_mean, TScomp: mean TS of all", "360) return dist def unit_vector(vector): \"\"\" Returns the unit vector of the vector.", "track on several dimensions dist_y = np.sum(deltas[5]) dist_z = np.sum(deltas[2]) dist_range = np.sum(deltas[3])", "TS_parameters, hac_info, orient): \"\"\" Process the pickle file from pymovies tracking and returns", "pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv'] = -999 # No Nv data", "to the nearest neighbour - State, Abrv, tailleMoyenne: variables from the hac info", "'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range', 'sd_x', 'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta',", "sd_cr = np.std(deltas[7]) sd_ca = np.std(deltas[8]) sd_tot = sd_x + sd_y + sd_z", "return vector / np.linalg.norm(vector) def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info, orient): \"\"\"", "(ns, 1970) logger.info(\"targets ready !\") ##### Tracks grouping and analysis logger.info('Gathering tracks data...')", "'x_gps', 'y_gps']] track_i = track_i.sort_values('timeTarget') # Sort by time deltas = [[], [],", "track deltas[6].append(tilt_angle) deltas[7].append(999) # relative and absolute heading is irrelevant on vertical echo", "run - orient: orientation ('H' or 'V') outputs: multiple csv - tracks: matrix", "get the TScomp_mean: mean TScomp for all frequencies TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS", "sd_range, sd_ta: standard deviation of previous displacement and angle - sd_tot: sum of", "track_i.y_gps.iloc[j - 1])) # distance between the 2 longitudes if orient == 'H':", "sd_y, sd_z, sd_range, sd_tot, sd_ta, sd_cr, sd_ca] ) dist_scores = pd.DataFrame(scores, index=range(len(scores)), #", "= targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by track length targets_data = targets_selected.sort_values('track') targets_data['timeInt']", "nearest neighbors algorithm distances, indices = nbrs.kneighbors(array) return distances[:, 1] def conjunction(*conditions): \"\"\"Multiple", "np.cos(tracks.x_gps) / 360) # Get the equivalent of the longitude in meters tracks['lat_m']", "track_i.x_gps.iloc[j - 1])) # distance between the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j -", "transducer, freq_TS, TS_parameters, hac_info, orient): \"\"\" Process the pickle file from pymovies tracking", "dist_range = np.sum(deltas[3]) dist_tot = dist_x + dist_y + dist_z tilt_angle = np.mean(deltas[6])", "tracks = targets.groupby(by=\"track\").target.agg('count') # get number of target per tracks tracks_len = pd.DataFrame(", "the hac_info columns (same for each run) if row.Name == name_transect: for header", "/ len(track_i), dist_z / len(track_i), dist_range, dist_tot, tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z,", "pickle file, output of movies TS analysis - path_output: path to store output", "targets_data.groupby('track').mean() # group targets by tracks, keep each parameters as mean tracks_data['Time'] =", "as np import functools import os import math logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG) def", "sd_x, sd_y, sd_z, sd_range, sd_tot, sd_ta, sd_cr, sd_ca] ) dist_scores = pd.DataFrame(scores, index=range(len(scores)),", "axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) # delta in z axis deltas[3].append(track_i.TSrange.iloc[j] -", "the Nv value for each track path_Nv = path_output + '/' + name_transect", "= targets_data['timeTarget'].apply(lambda x: x.value) # convert time to int (ns, 1970) logger.info(\"targets ready", "tracks.z_gps]) # preparing the array for nearest neighbors algorithm array = np.transpose(array) nbrs", "path_output + \"/\" + name_transect + \"_tracks.csv\" filename_2 = path_output + \"/\" +", "# group targets by tracks, keep each parameters as mean tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt'])", "pandas as pd import numpy as np import functools import os import math", "}, index=range(nb_target) ) freq = pd.DataFrame( # TS and frequency data { \"target\":", "algorithm distances, indices = nbrs.kneighbors(array) return distances[:, 1] def conjunction(*conditions): \"\"\"Multiple conditions filter", "get the position of each targets (relative and absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0,", "TS_freq['TScomp']], axis=1) # merge of all the data tracks = targets.groupby(by=\"track\").target.agg('count') # get", "all targets - freq: mean TScomp for each frequency \"\"\" if path_pickle[-7:] !=", "2 latitudes\"\"\" dlat = lat2 - lat1 dist = dlat * 60 *", "axis=1) # merge of all the data tracks = targets.groupby(by=\"track\").target.agg('count') # get number", "tracks_data['TScomp'] - ( 20 * np.log10(tracks_data['tailleMoyenne'])) # get the b20 from TScomp and", "m to transducer - TSalong, TSarthwart: mean angle in the transducer beam -", "path to store output csv - transducer; name of the used transducer -", "headings tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1]", "'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca']) tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track')", "- tracks, with the run track data filename_1 = path_output + \"/\" +", "axis deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) # delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j -", "sd_ca] ) dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing values as a panda data", "previous displacement and angle - sd_tot: sum of standard deviation - targets: matrix", "number of target per tracks tracks_len = pd.DataFrame( {'track': tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index))", "the b20 from TScomp and taille moyenne # get the Nv value for", "on several dimensions dist_y = np.sum(deltas[5]) dist_z = np.sum(deltas[2]) dist_range = np.sum(deltas[3]) dist_tot", "the pickle data in several panda tables. nb_target = len(result[0][indexSounder][indexTransducer]) # number of", "reference frequency - nb_target: number of targets per tracks - timeInt and Time:", "dist_tot, tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot,", "tracking data matrix ouput: column of distances to nearest neighbors in meters \"\"\"", "deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1])) # distance between the 2 latitudes deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j], track_i.y_gps.iloc[j", "\"\"\" tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get position of each tracks", "delta_t sd_x = np.std(deltas[4]) # standard deviation sd_y = np.std(deltas[5]) sd_z = np.std(deltas[2])", "Sv import logging import pandas as pd import numpy as np import functools", "tracks['long_m'] = tracks.y_gps * ( 40075000 * np.cos(tracks.x_gps) / 360) # Get the", "the TScomp_mean: mean TScomp for all frequencies TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS =", "the pickle file logger.error(\"Not a pickle file !\") return name_transect = os.path.basename(path_pickle)[:-18] logger.info(\"reading...\")", "csv - tracks: matrix of tracks with: - track, target: relative and absolute", "standard deviation of previous displacement and angle - sd_tot: sum of standard deviation", "for all tracks of each run - orient: orientation ('H' or 'V') outputs:", "between the 2 longitudes if orient == 'H': #Horizontal echo sounder if track_i.x.iloc[", "'vit_y', 'vit_z', 'vit_range', 'sd_x', 'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca']) tracks_data =", "delta_t vit_range = dist_range / delta_t sd_x = np.std(deltas[4]) # standard deviation sd_y", "tracking and returns several key parameters for each track. input: - path_pickle: path", "{'track': tracks.index, 'nb_target': tracks.values}, index=range(len(tracks.index)) ) targets = pd.merge(targets, tracks_len, how='inner', on='track') #", "dlon = lon2 - lon1 dist = dlon * (40075000 * math.cos(lat) /", "track_i.x.iloc[j - 1]) # delta in x axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1])", "pd.merge(targets, tracks_len, how='inner', on='track') # add the track length to the target data", "of each targets (relative and absolute) position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y',", "'y_gps', 'z_gps']) TS_means = freq.groupby(by=\"target\").mean() # get the TScomp_mean: mean TScomp for all", "\"target\": index_targets, \"TScomp\": np.array(result[2][indexSounder][indexTransducer]), \"TSucomp\": np.array(result[3][indexSounder][indexTransducer]), \"TSfreq\": np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq * nb_target) )", "1]) # delta in range deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j], track_i.x_gps.iloc[j - 1])) # distance between the", "\"_tracks.csv\" filename_2 = path_output + \"/\" + name_transect + \"_targets.csv\" tracks_data.to_csv(filename_1, index=False) targets_data.to_csv(filename_2,", "logger.info(\"done !\") targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) # merge of", "and relative) in degrees (according to orientation) - vit_x, vit_y, vit_z, vit_range: speed", "each track track_i = targets_data.loc[ targets_data['track'] == i, ['timeTarget', 'x', 'y', 'z', 'TSrange',", "data targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by track length targets_data =", "positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']], axis=1) # merge of all the data tracks = targets.groupby(by=\"track\").target.agg('count')", "latitudes\"\"\" dlat = lat2 - lat1 dist = dlat * 60 * 1852", "tracks of each run - orient: orientation ('H' or 'V') outputs: multiple csv", "the main data frame logger.info(\"Done !\") logger.debug('Tracks summary :') logger.debug(str(tracks_data.describe())) # Storing 2", "time in ns since 1970 and in string formats - k_dist: distance in", "= np.sum(deltas[3]) dist_tot = dist_x + dist_y + dist_z tilt_angle = np.mean(deltas[6]) #", "number of targets for the given sounder and transducer if nb_target > 0:", "hac_info.columns[1:]: tracks_data[header] = row[header] tracks_data['b20'] = tracks_data['TScomp'] - ( 20 * np.log10(tracks_data['tailleMoyenne'])) #", "index_targets += [i for j in range(nb_freq)] targets = pd.DataFrame( # individual target", "sd_tot, sd_ta, sd_cr, sd_ca] ) dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing values as", "tracks.values}, index=range(len(tracks.index)) ) targets = pd.merge(targets, tracks_len, how='inner', on='track') # add the track", "heading angle (absolute and relative) in degrees (according to orientation) - vit_x, vit_y,", "point_processing(tracks_data) # Distance to closest neighbor for index, row in hac_info.iterrows(): # add", "= np.vstack( [tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the array for nearest neighbors algorithm", "os.path.exists(path_Nv): Nv = pd.read_csv(path_Nv) tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv'] = -999 #", "mean TScomp for all frequencies TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'}) freq_TS = min(list(freq['TSfreq']), key=lambda", "frequencies or for the closest frequency from reference frequency - nb_target: number of", "> 0: result = pd.read_pickle(path_pickle) # read the pickle file else: logger.error(\"File empty", "tracks_data['b20'] = tracks_data['TScomp'] - ( 20 * np.log10(tracks_data['tailleMoyenne'])) # get the b20 from", "# delta in y axis deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) # delta in", "deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0] delta_t = delta_t.total_seconds() # time", "x: x.value) # convert time to int (ns, 1970) logger.info(\"targets ready !\") #####", "position = pd.DataFrame(result[6][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z']) positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer], index=range(0, len(result[0][indexSounder][indexTransducer])),", "= Sv.get_nv(tracks_data, Nv) else: tracks_data['Nv'] = -999 # No Nv data provided #", "vit_range: speed following different axis - sd_x, sd_y, sd_z, sd_range, sd_ta: standard deviation", "** 2 + deltas[1][j - 1] ** 2), deltas[2][j - 1])) - 90)", "analysis tracks_id = list(targets_data.groupby('track').groups) scores = [] for i in tracks_id: # for", "nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm distances, indices = nbrs.kneighbors(array) return", "tracks tracks['long_m'] = tracks.y_gps * ( 40075000 * np.cos(tracks.x_gps) / 360) # Get", "- freq_TS)) # closest frequency from the reference # frequency freq_TS TS_freq =", "np.array(result[9][indexSounder][indexTransducer]), }, index=range(nb_freq * nb_target) ) # get the position of each targets", "import math logger = logging.getLogger('marin') logger.setLevel(logging.DEBUG) def point_processing(tracks_data): \"\"\" input: tracking data matrix", "standard deviation sd_y = np.std(deltas[5]) sd_z = np.std(deltas[2]) sd_range = np.std(deltas[3]) sd_ta =", "row.Name == name_transect: for header in hac_info.columns[1:]: tracks_data[header] = row[header] tracks_data['b20'] = tracks_data['TScomp']", "- 1])) # distance between the 2 longitudes if orient == 'H': #Horizontal", "= dist_z / delta_t vit_range = dist_range / delta_t sd_x = np.std(deltas[4]) #", "[] for i in range(nb_target): index_targets += [i for j in range(nb_freq)] targets", "# Sort by time deltas = [[], [], [], [], [], [], [],", "vertical echo sounder deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0] delta_t =", "info file - b20: b20 value - Nv: Nv value - dist_x, dist_y,", "longitudes if orient == 'H': #Horizontal echo sounder if track_i.x.iloc[ j] > 0:", "* (40075000 * math.cos(lat) / 360) return dist def unit_vector(vector): \"\"\" Returns the", "= abs(math.degrees( math.atan2(deltas[1][j - 1], - deltas[0][j - 1]))) # heading relative to", "nb_target: number of targets per tracks - timeInt and Time: mean time in", "targets per tracks - timeInt and Time: mean time in ns since 1970", "point_processing(tracks_data): \"\"\" input: tracking data matrix ouput: column of distances to nearest neighbors", "is irrelevant on vertical echo sounder deltas[8].append(999) delta_t = track_i.timeTarget.iloc[len(track_i) - 1] -", "neighbors algorithm array = np.transpose(array) nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm", "- 1])) - 90) # tilt angle of the track deltas[6].append(tilt_angle) deltas[7].append(999) #", "x_gps, y_gps, z_gps: relative and absolute position - TScomp_mean, TScomp: mean TS of", "tilt_angle = np.mean(deltas[6]) # mean tilt angle of the track cap_rel = np.mean(deltas[7])", "'TSrange', 'x_gps', 'y_gps']] track_i = track_i.sort_values('timeTarget') # Sort by time deltas = [[],", "lat1 dist = dlat * 60 * 1852 return dist def calc_distance_long(lat, lon1,", "freq_TS TS_freq = freq[freq.TSfreq == freq_TS] # get the TScomp for the given", "- 1]) # delta in x axis deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) #", "Check the pickle file logger.error(\"Not a pickle file !\") return name_transect = os.path.basename(path_pickle)[:-18]", "index, row in hac_info.iterrows(): # add the hac_info columns (same for each run)", "to have 0-360° headings tilt_angle = (math.degrees( math.atan2(math.sqrt(deltas[0][j - 1] ** 2 +", "# add the hac_info columns (same for each run) if row.Name == name_transect:", "pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info, orient): \"\"\" Process the pickle file from", "relative and absolute heading is irrelevant on vertical echo sounder deltas[8].append(999) delta_t =", "- sd_tot: sum of standard deviation - targets: matrix of all targets -", "track cap_rel = np.mean(deltas[7]) # mean relative heading of the track cap_abs =" ]
[ "\"\" number = False capL = False capW = False for j in", "elif not number: if capW and ord(j) in range(97, 123): output = output", "3, 28, 28) # Export the model torch_out = torch.onnx._export(model, # model being", "image.convert('RGB') width, height = image.size num = round(width/height/0.78) w = width/num letters =", "# store the trained parameter weights inside the model file def inspect_model(): #", "image = Image.open(\"z.jpg\") # # image = image.convert('RGB') image = np.array(image) image =", "False for j in letters: if j == '#': number = True elif", "in letters: if j == '#': number = True elif ord(j) == 126:", "file-like object) export_params=True) # store the trained parameter weights inside the model file", "output = output + chr(ord(j)-48) elif ord(j) == 106: output = output +", "np import cv2 from PIL import Image import torch.utils.model_zoo as model_zoo import torch.onnx", "chr(48) else: output = output + j return output class CNN(nn.Module): def __init__(self):", "model.run(W)[0] print(model_out) # # # onnx_model(image) # # print(onnx_model) # onnx.checker.check_model(onnx_model) # #", "onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\") # # image = image.convert('RGB') image = np.array(image) image", "output = output + j elif not number: if capW and ord(j) in", "1) * w, height)) # cropped.show() cropped = np.array(cropped) cropped = cv2.resize(cropped, (28,", ":, :]) cropped = cropped.permute(0, 3, 1, 2) predicted_tensor = model(cropped) _, predicted_letter", "ONNX model onnx_model = onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\") # #", "(28, 28)) cropped = cropped.astype(np.float32) / 255.0 cropped = torch.from_numpy(cropped[None, :, :, :])", "7 * 7, 100), nn.LeakyReLU(), nn.Linear(100, 37) ) # 1x36 def forward(self, x):", "out = self.block3(out) return out # print(make_prediction(\"test/Prairie.jpg\")) # print(make_prediction(\"test/He_was_happy..png\")) # print(make_prediction(\"test/the_little.png\")) # print(make_prediction(\"test/with_his_family.png\"))", "letters.append(chr(45)) elif int(predicted_letter) == 33: letters.append(chr(59)) elif int(predicted_letter) == 34: letters.append(chr(63)) elif int(predicted_letter)", "self.block1 = nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2),", "store the trained parameter weights inside the model file def inspect_model(): # Input", "= nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU()", "from onnx import checker, helper import torch.optim as optim import numpy as np", "ord(j) in range(97, 106): output = output + chr(ord(j)-48) elif ord(j) == 106:", "return out # print(make_prediction(\"test/Prairie.jpg\")) # print(make_prediction(\"test/He_was_happy..png\")) # print(make_prediction(\"test/the_little.png\")) # print(make_prediction(\"test/with_his_family.png\")) # print(make_prediction(\"test/with_his_mouth..png\")) #", "+ predicted_letter)) output = \"\" number = False capL = False capW =", "elif ord(j) == 126: if capL: capW = True capL = True elif", "onnx # import onnx_caffe2.backend # from onnx import checker, helper import torch.optim as", "== 32: letters.append(chr(45)) elif int(predicted_letter) == 33: letters.append(chr(59)) elif int(predicted_letter) == 34: letters.append(chr(63))", "= False capW = False for j in letters: if j == '#':", "predicted_tensor = model(cropped) _, predicted_letter = torch.max(predicted_tensor, 1) if int(predicted_letter) == 26: letters.append(chr(32))", "linearly self.block3 = nn.Sequential( nn.Linear(32 * 7 * 7, 100), nn.LeakyReLU(), nn.Linear(100, 37)", "model (can be a file or file-like object) export_params=True) # store the trained", "the trained parameter weights inside the model file def inspect_model(): # Input image", "+ chr(ord(j) - 32) capL = False else: output = output + j", "cv2.resize(cropped, (28, 28)) cropped = cropped.astype(np.float32) / 255.0 cropped = torch.from_numpy(cropped[None, :, :,", "CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path) image = image.convert('RGB') width, height = image.size num", "onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\") # # image = image.convert('RGB') image", "== 106: output = output + chr(48) else: output = output + j", "Image import torch.utils.model_zoo as model_zoo import torch.onnx def export_model(): model = CNN() model.load_state_dict(torch.load(\"model.pth\"))", "number = False capL = False capW = False for j in letters:", "= True elif j == ' ': number = False capL = False", "or file-like object) export_params=True) # store the trained parameter weights inside the model", "def export_model(): model = CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input to the model x =", "output + j return output class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.block1 =", "int(predicted_letter) == 34: letters.append(chr(63)) elif int(predicted_letter) == 35: letters.append(chr(33)) elif int(predicted_letter) == 36:", "chr(ord(j) - 32) elif capL and ord(j) in range(97, 123): output = output", "letters.append(chr(32)) elif int(predicted_letter) == 27: letters.append(chr(35)) elif int(predicted_letter) == 28: letters.append(chr(46)) elif int(predicted_letter)", "nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU() ) #", "export_model(): model = CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input to the model x = torch.randn(5,", "28)) image = image.astype(np.float32) / 255.0 image = torch.from_numpy(image[None, :, :, :]) image", "a tuple for multiple inputs) \"model.onnx-2\", # where to save the model (can", "letters.append(chr(59)) elif int(predicted_letter) == 34: letters.append(chr(63)) elif int(predicted_letter) == 35: letters.append(chr(33)) elif int(predicted_letter)", "output = output + j return output class CNN(nn.Module): def __init__(self): super(CNN, self).__init__()", "False output = output + j elif not number: if capW and ord(j)", "torch.onnx def export_model(): model = CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input to the model x", "for j in letters: if j == '#': number = True elif ord(j)", "model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path) image = image.convert('RGB') width, height = image.size num =", "image = Image.open(img_path) image = image.convert('RGB') width, height = image.size num = round(width/height/0.78)", "int(predicted_letter) == 33: letters.append(chr(59)) elif int(predicted_letter) == 34: letters.append(chr(63)) elif int(predicted_letter) == 35:", "to save the model (can be a file or file-like object) export_params=True) #", "ord(j) in range(97, 123): output = output + chr(ord(j) - 32) elif capL", "# # image = image.convert('RGB') image = np.array(image) image = cv2.resize(image, (28, 28))", "import cv2 from PIL import Image import torch.utils.model_zoo as model_zoo import torch.onnx def", "7, 100), nn.LeakyReLU(), nn.Linear(100, 37) ) # 1x36 def forward(self, x): out =", "the model torch_out = torch.onnx._export(model, # model being run x, # model input", "123): output = output + chr(ord(j) - 32) capL = False else: output", "= True capL = True elif j == ' ': number = False", "28: letters.append(chr(46)) elif int(predicted_letter) == 29: letters.append(chr(44)) elif int(predicted_letter) == 30: letters.append(chr(58)) elif", "28, 28) # Export the model torch_out = torch.onnx._export(model, # model being run", "nn.LeakyReLU(), nn.Linear(100, 37) ) # 1x36 def forward(self, x): out = self.block1(x) out", "model torch_out = torch.onnx._export(model, # model being run x, # model input (or", ") # linearly self.block3 = nn.Sequential( nn.Linear(32 * 7 * 7, 100), nn.LeakyReLU(),", "import checker, helper import torch.optim as optim import numpy as np import cv2", "nn.Linear(32 * 7 * 7, 100), nn.LeakyReLU(), nn.Linear(100, 37) ) # 1x36 def", "out # print(make_prediction(\"test/Prairie.jpg\")) # print(make_prediction(\"test/He_was_happy..png\")) # print(make_prediction(\"test/the_little.png\")) # print(make_prediction(\"test/with_his_family.png\")) # print(make_prediction(\"test/with_his_mouth..png\")) # print(make_prediction(\"test/would_run_and_get_it.png\"))", "= np.array(cropped) cropped = cv2.resize(cropped, (28, 28)) cropped = cropped.astype(np.float32) / 255.0 cropped", "= True elif ord(j) == 126: if capL: capW = True capL =", "== 27: letters.append(chr(35)) elif int(predicted_letter) == 28: letters.append(chr(46)) elif int(predicted_letter) == 29: letters.append(chr(44))", "= False capW = False output = output + j elif not number:", "= onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\") # # image = image.convert('RGB') image = np.array(image)", "self).__init__() self.block1 = nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2), # 16x28x28", "output = \"\" number = False capL = False capW = False for", "width/num letters = [] for i in range(0, num): cropped = image.crop((i *", "int(predicted_letter) == 32: letters.append(chr(45)) elif int(predicted_letter) == 33: letters.append(chr(59)) elif int(predicted_letter) == 34:", "== 31: letters.append(chr(92)) elif int(predicted_letter) == 32: letters.append(chr(45)) elif int(predicted_letter) == 33: letters.append(chr(59))", "from PIL import Image import torch.utils.model_zoo as model_zoo import torch.onnx def export_model(): model", "height)) # cropped.show() cropped = np.array(cropped) cropped = cv2.resize(cropped, (28, 28)) cropped =", "= nn.Sequential( nn.Linear(32 * 7 * 7, 100), nn.LeakyReLU(), nn.Linear(100, 37) ) #", "int(predicted_letter) == 36: letters.append(chr(126)) else: letters.append(chr(97 + predicted_letter)) output = \"\" number =", "ord(j) == 106: output = output + chr(48) else: output = output +", "= torch.from_numpy(image[None, :, :, :]) image = image.permute(0, 3, 1, 2) W =", "nn.LeakyReLU() ) # linearly self.block3 = nn.Sequential( nn.Linear(32 * 7 * 7, 100),", "self.block1(x) out = self.block2(out) # flatten the dataset # ipdb; ipdb.set_trace() out =", "(i + 1) * w, height)) # cropped.show() cropped = np.array(cropped) cropped =", "image.size num = round(width/height/0.78) w = width/num letters = [] for i in", "if capL: capW = True capL = True elif j == ' ':", "make_prediction(img_path): model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path) image = image.convert('RGB') width, height", "# import torch.onnx # import onnx # import onnx_caffe2.backend # from onnx import", "np.array(image) image = cv2.resize(image, (28, 28)) image = image.astype(np.float32) / 255.0 image =", "letters = [] for i in range(0, num): cropped = image.crop((i * w,", "= torch.onnx._export(model, # model being run x, # model input (or a tuple", "capL = True elif j == ' ': number = False capL =", "def inspect_model(): # Input image into the ONNX model onnx_model = onnx.load(\"model.onnx\") model", "# import onnx_caffe2.backend # from onnx import checker, helper import torch.optim as optim", "range(97, 123): output = output + chr(ord(j) - 32) elif capL and ord(j)", "False capL = False capW = False output = output + j elif", "output = output + chr(ord(j) - 32) elif capL and ord(j) in range(97,", "image into the ONNX model onnx_model = onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model) image =", "126: if capL: capW = True capL = True elif j == '", "ipdb.set_trace() out = out.view(-1, 32 * 7 * 7) out = self.block3(out) return", "= CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path) image = image.convert('RGB') width, height = image.size", "helper import torch.optim as optim import numpy as np import cv2 from PIL", "input (or a tuple for multiple inputs) \"model.onnx-2\", # where to save the", "# where to save the model (can be a file or file-like object)", "cropped.astype(np.float32) / 255.0 cropped = torch.from_numpy(cropped[None, :, :, :]) cropped = cropped.permute(0, 3,", "torch_out = torch.onnx._export(model, # model being run x, # model input (or a", "== 28: letters.append(chr(46)) elif int(predicted_letter) == 29: letters.append(chr(44)) elif int(predicted_letter) == 30: letters.append(chr(58))", "nn.Sequential( nn.Linear(32 * 7 * 7, 100), nn.LeakyReLU(), nn.Linear(100, 37) ) # 1x36", "= output + j return output class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.block1", "capW = False output = output + j elif not number: if capW", "31: letters.append(chr(92)) elif int(predicted_letter) == 32: letters.append(chr(45)) elif int(predicted_letter) == 33: letters.append(chr(59)) elif", "import torch.optim as optim import numpy as np import cv2 from PIL import", "num): cropped = image.crop((i * w, 0, (i + 1) * w, height))", "letters.append(chr(44)) elif int(predicted_letter) == 30: letters.append(chr(58)) elif int(predicted_letter) == 31: letters.append(chr(92)) elif int(predicted_letter)", "number = False capL = False capW = False output = output +", "predicted_letter)) output = \"\" number = False capL = False capW = False", "the model x = torch.randn(5, 3, 28, 28) # Export the model torch_out", "range(0, num): cropped = image.crop((i * w, 0, (i + 1) * w,", "image = np.array(image) image = cv2.resize(image, (28, 28)) image = image.astype(np.float32) / 255.0", "cropped = cropped.permute(0, 3, 1, 2) predicted_tensor = model(cropped) _, predicted_letter = torch.max(predicted_tensor,", "= output + chr(ord(j)-48) elif ord(j) == 106: output = output + chr(48)", "output class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.block1 = nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3,", "np.array(cropped) cropped = cv2.resize(cropped, (28, 28)) cropped = cropped.astype(np.float32) / 255.0 cropped =", "(28, 28)) image = image.astype(np.float32) / 255.0 image = torch.from_numpy(image[None, :, :, :])", "# # print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path) image", "image.data.numpy()} model_out = model.run(W)[0] print(model_out) # # # onnx_model(image) # # print(onnx_model) #", "# 32x7x7 nn.LeakyReLU() ) # linearly self.block3 = nn.Sequential( nn.Linear(32 * 7 *", "elif int(predicted_letter) == 30: letters.append(chr(58)) elif int(predicted_letter) == 31: letters.append(chr(92)) elif int(predicted_letter) ==", "nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU() ) # linearly self.block3 = nn.Sequential( nn.Linear(32 * 7", "as optim import numpy as np import cv2 from PIL import Image import", "= torch.max(predicted_tensor, 1) if int(predicted_letter) == 26: letters.append(chr(32)) elif int(predicted_letter) == 27: letters.append(chr(35))", "import torch.onnx def export_model(): model = CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input to the model", "': number = False capL = False capW = False output = output", "object) export_params=True) # store the trained parameter weights inside the model file def", "3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU() )", "j == ' ': number = False capL = False capW = False", "# # onnx_model(image) # # print(onnx_model) # onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path):", "True elif j == ' ': number = False capL = False capW", "onnx_model = onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\") # # image =", "height = image.size num = round(width/height/0.78) w = width/num letters = [] for", "== 36: letters.append(chr(126)) else: letters.append(chr(97 + predicted_letter)) output = \"\" number = False", "PIL import Image import torch.utils.model_zoo as model_zoo import torch.onnx def export_model(): model =", "* w, 0, (i + 1) * w, height)) # cropped.show() cropped =", "1, 2) predicted_tensor = model(cropped) _, predicted_letter = torch.max(predicted_tensor, 1) if int(predicted_letter) ==", "= [] for i in range(0, num): cropped = image.crop((i * w, 0,", "7) out = self.block3(out) return out # print(make_prediction(\"test/Prairie.jpg\")) # print(make_prediction(\"test/He_was_happy..png\")) # print(make_prediction(\"test/the_little.png\")) #", "image.convert('RGB') image = np.array(image) image = cv2.resize(image, (28, 28)) image = image.astype(np.float32) /", "import Image import torch.utils.model_zoo as model_zoo import torch.onnx def export_model(): model = CNN()", "123): output = output + chr(ord(j) - 32) elif capL and ord(j) in", "as nn # import torch.onnx # import onnx # import onnx_caffe2.backend # from", "== 34: letters.append(chr(63)) elif int(predicted_letter) == 35: letters.append(chr(33)) elif int(predicted_letter) == 36: letters.append(chr(126))", "forward(self, x): out = self.block1(x) out = self.block2(out) # flatten the dataset #", "if int(predicted_letter) == 26: letters.append(chr(32)) elif int(predicted_letter) == 27: letters.append(chr(35)) elif int(predicted_letter) ==", "28)) cropped = cropped.astype(np.float32) / 255.0 cropped = torch.from_numpy(cropped[None, :, :, :]) cropped", "import onnx # import onnx_caffe2.backend # from onnx import checker, helper import torch.optim", "# cropped.show() cropped = np.array(cropped) cropped = cv2.resize(cropped, (28, 28)) cropped = cropped.astype(np.float32)", "elif int(predicted_letter) == 36: letters.append(chr(126)) else: letters.append(chr(97 + predicted_letter)) output = \"\" number", "+ j else: if ord(j) in range(97, 106): output = output + chr(ord(j)-48)", "self.block3 = nn.Sequential( nn.Linear(32 * 7 * 7, 100), nn.LeakyReLU(), nn.Linear(100, 37) )", "torch.nn as nn # import torch.onnx # import onnx # import onnx_caffe2.backend #", "cropped = np.array(cropped) cropped = cv2.resize(cropped, (28, 28)) cropped = cropped.astype(np.float32) / 255.0", "j else: if ord(j) in range(97, 106): output = output + chr(ord(j)-48) elif", "# flatten the dataset # ipdb; ipdb.set_trace() out = out.view(-1, 32 * 7", "cropped = cv2.resize(cropped, (28, 28)) cropped = cropped.astype(np.float32) / 255.0 cropped = torch.from_numpy(cropped[None,", "+ j return output class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.block1 = nn.Sequential(", "(can be a file or file-like object) export_params=True) # store the trained parameter", "= out.view(-1, 32 * 7 * 7) out = self.block3(out) return out #", "image = image.convert('RGB') image = np.array(image) image = cv2.resize(image, (28, 28)) image =", "trained parameter weights inside the model file def inspect_model(): # Input image into", "model onnx_model = onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\") # # image", "= image.convert('RGB') image = np.array(image) image = cv2.resize(image, (28, 28)) image = image.astype(np.float32)", "= model.run(W)[0] print(model_out) # # # onnx_model(image) # # print(onnx_model) # onnx.checker.check_model(onnx_model) #", "= False for j in letters: if j == '#': number = True", "# 16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU() ) # 16x14x14 self.block2 = nn.Sequential( nn.Conv2d(in_channels=16,", "num = round(width/height/0.78) w = width/num letters = [] for i in range(0,", "= output + chr(ord(j) - 32) elif capL and ord(j) in range(97, 123):", "number: if capW and ord(j) in range(97, 123): output = output + chr(ord(j)", "image.astype(np.float32) / 255.0 image = torch.from_numpy(image[None, :, :, :]) image = image.permute(0, 3,", "False else: output = output + j else: if ord(j) in range(97, 106):", "= image.crop((i * w, 0, (i + 1) * w, height)) # cropped.show()", "weights inside the model file def inspect_model(): # Input image into the ONNX", "1x36 def forward(self, x): out = self.block1(x) out = self.block2(out) # flatten the", "model input (or a tuple for multiple inputs) \"model.onnx-2\", # where to save", "cropped = torch.from_numpy(cropped[None, :, :, :]) cropped = cropped.permute(0, 3, 1, 2) predicted_tensor", "+ chr(48) else: output = output + j return output class CNN(nn.Module): def", "as model_zoo import torch.onnx def export_model(): model = CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input to", "in range(97, 123): output = output + chr(ord(j) - 32) elif capL and", "/ 255.0 cropped = torch.from_numpy(cropped[None, :, :, :]) cropped = cropped.permute(0, 3, 1,", "as np import cv2 from PIL import Image import torch.utils.model_zoo as model_zoo import", "model(cropped) _, predicted_letter = torch.max(predicted_tensor, 1) if int(predicted_letter) == 26: letters.append(chr(32)) elif int(predicted_letter)", "32 * 7 * 7) out = self.block3(out) return out # print(make_prediction(\"test/Prairie.jpg\")) #", "* 7 * 7) out = self.block3(out) return out # print(make_prediction(\"test/Prairie.jpg\")) # print(make_prediction(\"test/He_was_happy..png\"))", "w, height)) # cropped.show() cropped = np.array(cropped) cropped = cv2.resize(cropped, (28, 28)) cropped", "kernel_size=5, stride=1, padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU() ) # 16x14x14 self.block2", "def make_prediction(img_path): model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path) image = image.convert('RGB') width,", "letters.append(chr(58)) elif int(predicted_letter) == 31: letters.append(chr(92)) elif int(predicted_letter) == 32: letters.append(chr(45)) elif int(predicted_letter)", "x, # model input (or a tuple for multiple inputs) \"model.onnx-2\", # where", "model x = torch.randn(5, 3, 28, 28) # Export the model torch_out =", "= self.block1(x) out = self.block2(out) # flatten the dataset # ipdb; ipdb.set_trace() out", "optim import numpy as np import cv2 from PIL import Image import torch.utils.model_zoo", "capL = False else: output = output + j else: if ord(j) in", "int(predicted_letter) == 30: letters.append(chr(58)) elif int(predicted_letter) == 31: letters.append(chr(92)) elif int(predicted_letter) == 32:", "else: letters.append(chr(97 + predicted_letter)) output = \"\" number = False capL = False", "capL: capW = True capL = True elif j == ' ': number", "self.block3(out) return out # print(make_prediction(\"test/Prairie.jpg\")) # print(make_prediction(\"test/He_was_happy..png\")) # print(make_prediction(\"test/the_little.png\")) # print(make_prediction(\"test/with_his_family.png\")) # print(make_prediction(\"test/with_his_mouth..png\"))", "capL and ord(j) in range(97, 123): output = output + chr(ord(j) - 32)", "the model file def inspect_model(): # Input image into the ONNX model onnx_model", "elif capL and ord(j) in range(97, 123): output = output + chr(ord(j) -", "False capL = False capW = False for j in letters: if j", "in range(0, num): cropped = image.crop((i * w, 0, (i + 1) *", "1) if int(predicted_letter) == 26: letters.append(chr(32)) elif int(predicted_letter) == 27: letters.append(chr(35)) elif int(predicted_letter)", "and ord(j) in range(97, 123): output = output + chr(ord(j) - 32) capL", "the ONNX model onnx_model = onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\") #", "# from onnx import checker, helper import torch.optim as optim import numpy as", "_, predicted_letter = torch.max(predicted_tensor, 1) if int(predicted_letter) == 26: letters.append(chr(32)) elif int(predicted_letter) ==", "the dataset # ipdb; ipdb.set_trace() out = out.view(-1, 32 * 7 * 7)", "= model(cropped) _, predicted_letter = torch.max(predicted_tensor, 1) if int(predicted_letter) == 26: letters.append(chr(32)) elif", "where to save the model (can be a file or file-like object) export_params=True)", "torch.max(predicted_tensor, 1) if int(predicted_letter) == 26: letters.append(chr(32)) elif int(predicted_letter) == 27: letters.append(chr(35)) elif", "2) W = {model.graph.input[0].name: image.data.numpy()} model_out = model.run(W)[0] print(model_out) # # # onnx_model(image)", "output = output + j else: if ord(j) in range(97, 106): output =", "16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU() ) # 16x14x14 self.block2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32,", "self.block2(out) # flatten the dataset # ipdb; ipdb.set_trace() out = out.view(-1, 32 *", "out_channels=16, kernel_size=5, stride=1, padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU() ) # 16x14x14", "= False else: output = output + j else: if ord(j) in range(97,", "= self.block3(out) return out # print(make_prediction(\"test/Prairie.jpg\")) # print(make_prediction(\"test/He_was_happy..png\")) # print(make_prediction(\"test/the_little.png\")) # print(make_prediction(\"test/with_his_family.png\")) #", "0, (i + 1) * w, height)) # cropped.show() cropped = np.array(cropped) cropped", "model being run x, # model input (or a tuple for multiple inputs)", "29: letters.append(chr(44)) elif int(predicted_letter) == 30: letters.append(chr(58)) elif int(predicted_letter) == 31: letters.append(chr(92)) elif", "nn # import torch.onnx # import onnx # import onnx_caffe2.backend # from onnx", "255.0 cropped = torch.from_numpy(cropped[None, :, :, :]) cropped = cropped.permute(0, 3, 1, 2)", "model = CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input to the model x = torch.randn(5, 3,", ") # 1x36 def forward(self, x): out = self.block1(x) out = self.block2(out) #", "w, 0, (i + 1) * w, height)) # cropped.show() cropped = np.array(cropped)", "# # # onnx_model(image) # # print(onnx_model) # onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph)) def", ":]) cropped = cropped.permute(0, 3, 1, 2) predicted_tensor = model(cropped) _, predicted_letter =", "= output + j else: if ord(j) in range(97, 106): output = output", "image.crop((i * w, 0, (i + 1) * w, height)) # cropped.show() cropped", "== 35: letters.append(chr(33)) elif int(predicted_letter) == 36: letters.append(chr(126)) else: letters.append(chr(97 + predicted_letter)) output", "flatten the dataset # ipdb; ipdb.set_trace() out = out.view(-1, 32 * 7 *", "output + chr(ord(j) - 32) elif capL and ord(j) in range(97, 123): output", "letters.append(chr(126)) else: letters.append(chr(97 + predicted_letter)) output = \"\" number = False capL =", ":]) image = image.permute(0, 3, 1, 2) W = {model.graph.input[0].name: image.data.numpy()} model_out =", "# Input to the model x = torch.randn(5, 3, 28, 28) # Export", "class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.block1 = nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3, out_channels=16,", "nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU() ) # 16x14x14 self.block2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5,", "elif int(predicted_letter) == 27: letters.append(chr(35)) elif int(predicted_letter) == 28: letters.append(chr(46)) elif int(predicted_letter) ==", "1, 2) W = {model.graph.input[0].name: image.data.numpy()} model_out = model.run(W)[0] print(model_out) # # #", "7 * 7) out = self.block3(out) return out # print(make_prediction(\"test/Prairie.jpg\")) # print(make_prediction(\"test/He_was_happy..png\")) #", "28) # Export the model torch_out = torch.onnx._export(model, # model being run x,", "' ': number = False capL = False capW = False output =", "= image.astype(np.float32) / 255.0 image = torch.from_numpy(image[None, :, :, :]) image = image.permute(0,", "if capW and ord(j) in range(97, 123): output = output + chr(ord(j) -", "and ord(j) in range(97, 123): output = output + chr(ord(j) - 32) elif", "range(97, 123): output = output + chr(ord(j) - 32) capL = False else:", "== 29: letters.append(chr(44)) elif int(predicted_letter) == 30: letters.append(chr(58)) elif int(predicted_letter) == 31: letters.append(chr(92))", "32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU() ) # linearly self.block3 = nn.Sequential( nn.Linear(32 *", "33: letters.append(chr(59)) elif int(predicted_letter) == 34: letters.append(chr(63)) elif int(predicted_letter) == 35: letters.append(chr(33)) elif", "CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.block1 = nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5,", "Image.open(img_path) image = image.convert('RGB') width, height = image.size num = round(width/height/0.78) w =", "# linearly self.block3 = nn.Sequential( nn.Linear(32 * 7 * 7, 100), nn.LeakyReLU(), nn.Linear(100,", "3, 1, 2) W = {model.graph.input[0].name: image.data.numpy()} model_out = model.run(W)[0] print(model_out) # #", "# model being run x, # model input (or a tuple for multiple", "import torch import torch.nn as nn # import torch.onnx # import onnx #", "out = out.view(-1, 32 * 7 * 7) out = self.block3(out) return out", "model.load_state_dict(torch.load(\"model.pth\")) # Input to the model x = torch.randn(5, 3, 28, 28) #", "elif int(predicted_letter) == 31: letters.append(chr(92)) elif int(predicted_letter) == 32: letters.append(chr(45)) elif int(predicted_letter) ==", "# # print(onnx_model) # onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model = CNN()", "model_zoo import torch.onnx def export_model(): model = CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input to the", "model = onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\") # # image = image.convert('RGB') image =", "False capW = False for j in letters: if j == '#': number", "letters.append(chr(97 + predicted_letter)) output = \"\" number = False capL = False capW", "i in range(0, num): cropped = image.crop((i * w, 0, (i + 1)", "nn.Linear(100, 37) ) # 1x36 def forward(self, x): out = self.block1(x) out =", "= cv2.resize(cropped, (28, 28)) cropped = cropped.astype(np.float32) / 255.0 cropped = torch.from_numpy(cropped[None, :,", "save the model (can be a file or file-like object) export_params=True) # store", "image = image.permute(0, 3, 1, 2) W = {model.graph.input[0].name: image.data.numpy()} model_out = model.run(W)[0]", ":, :, :]) cropped = cropped.permute(0, 3, 1, 2) predicted_tensor = model(cropped) _,", "+ chr(ord(j)-48) elif ord(j) == 106: output = output + chr(48) else: output", "* 7) out = self.block3(out) return out # print(make_prediction(\"test/Prairie.jpg\")) # print(make_prediction(\"test/He_was_happy..png\")) # print(make_prediction(\"test/the_little.png\"))", "35: letters.append(chr(33)) elif int(predicted_letter) == 36: letters.append(chr(126)) else: letters.append(chr(97 + predicted_letter)) output =", "torch.optim as optim import numpy as np import cv2 from PIL import Image", "True capL = True elif j == ' ': number = False capL", "onnx_model(image) # # print(onnx_model) # onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model =", "capL = False capW = False for j in letters: if j ==", "else: output = output + j return output class CNN(nn.Module): def __init__(self): super(CNN,", "capW = True capL = True elif j == ' ': number =", "output + j elif not number: if capW and ord(j) in range(97, 123):", "output + chr(ord(j) - 32) capL = False else: output = output +", "j return output class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.block1 = nn.Sequential( #", "letters.append(chr(33)) elif int(predicted_letter) == 36: letters.append(chr(126)) else: letters.append(chr(97 + predicted_letter)) output = \"\"", "parameter weights inside the model file def inspect_model(): # Input image into the", "elif int(predicted_letter) == 29: letters.append(chr(44)) elif int(predicted_letter) == 30: letters.append(chr(58)) elif int(predicted_letter) ==", "# model input (or a tuple for multiple inputs) \"model.onnx-2\", # where to", "Input image into the ONNX model onnx_model = onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model) image", "image = cv2.resize(image, (28, 28)) image = image.astype(np.float32) / 255.0 image = torch.from_numpy(image[None,", "chr(ord(j) - 32) capL = False else: output = output + j else:", "if j == '#': number = True elif ord(j) == 126: if capL:", "# onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image =", "nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU() )", "onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path)", "output + chr(ord(j)-48) elif ord(j) == 106: output = output + chr(48) else:", "else: if ord(j) in range(97, 106): output = output + chr(ord(j)-48) elif ord(j)", "nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU() ) #", "torch import torch.nn as nn # import torch.onnx # import onnx # import", "cv2 from PIL import Image import torch.utils.model_zoo as model_zoo import torch.onnx def export_model():", "= nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2), #", "32) elif capL and ord(j) in range(97, 123): output = output + chr(ord(j)", "import torch.nn as nn # import torch.onnx # import onnx # import onnx_caffe2.backend", "106): output = output + chr(ord(j)-48) elif ord(j) == 106: output = output", "(or a tuple for multiple inputs) \"model.onnx-2\", # where to save the model", "model file def inspect_model(): # Input image into the ONNX model onnx_model =", "if ord(j) in range(97, 106): output = output + chr(ord(j)-48) elif ord(j) ==", "# print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path) image =", "36: letters.append(chr(126)) else: letters.append(chr(97 + predicted_letter)) output = \"\" number = False capL", "34: letters.append(chr(63)) elif int(predicted_letter) == 35: letters.append(chr(33)) elif int(predicted_letter) == 36: letters.append(chr(126)) else:", "Export the model torch_out = torch.onnx._export(model, # model being run x, # model", "image = image.convert('RGB') width, height = image.size num = round(width/height/0.78) w = width/num", "255.0 image = torch.from_numpy(image[None, :, :, :]) image = image.permute(0, 3, 1, 2)", "width, height = image.size num = round(width/height/0.78) w = width/num letters = []", "ipdb; ipdb.set_trace() out = out.view(-1, 32 * 7 * 7) out = self.block3(out)", "chr(ord(j)-48) elif ord(j) == 106: output = output + chr(48) else: output =", "= image.size num = round(width/height/0.78) w = width/num letters = [] for i", "number = True elif ord(j) == 126: if capL: capW = True capL", "elif int(predicted_letter) == 35: letters.append(chr(33)) elif int(predicted_letter) == 36: letters.append(chr(126)) else: letters.append(chr(97 +", "32x7x7 nn.LeakyReLU() ) # linearly self.block3 = nn.Sequential( nn.Linear(32 * 7 * 7,", "[] for i in range(0, num): cropped = image.crop((i * w, 0, (i", "int(predicted_letter) == 28: letters.append(chr(46)) elif int(predicted_letter) == 29: letters.append(chr(44)) elif int(predicted_letter) == 30:", "torch.onnx._export(model, # model being run x, # model input (or a tuple for", "j elif not number: if capW and ord(j) in range(97, 123): output =", "output = output + chr(ord(j) - 32) capL = False else: output =", "# image = image.convert('RGB') image = np.array(image) image = cv2.resize(image, (28, 28)) image", "'#': number = True elif ord(j) == 126: if capL: capW = True", "== 26: letters.append(chr(32)) elif int(predicted_letter) == 27: letters.append(chr(35)) elif int(predicted_letter) == 28: letters.append(chr(46))", "def forward(self, x): out = self.block1(x) out = self.block2(out) # flatten the dataset", "37) ) # 1x36 def forward(self, x): out = self.block1(x) out = self.block2(out)", "= Image.open(\"z.jpg\") # # image = image.convert('RGB') image = np.array(image) image = cv2.resize(image,", "letters: if j == '#': number = True elif ord(j) == 126: if", "model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path) image = image.convert('RGB') width, height =", "onnx_caffe2.backend # from onnx import checker, helper import torch.optim as optim import numpy", "# import onnx # import onnx_caffe2.backend # from onnx import checker, helper import", "in range(97, 123): output = output + chr(ord(j) - 32) capL = False", "x = torch.randn(5, 3, 28, 28) # Export the model torch_out = torch.onnx._export(model,", "cropped.permute(0, 3, 1, 2) predicted_tensor = model(cropped) _, predicted_letter = torch.max(predicted_tensor, 1) if", "nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14", "= image.permute(0, 3, 1, 2) W = {model.graph.input[0].name: image.data.numpy()} model_out = model.run(W)[0] print(model_out)", "image.permute(0, 3, 1, 2) W = {model.graph.input[0].name: image.data.numpy()} model_out = model.run(W)[0] print(model_out) #", "file def inspect_model(): # Input image into the ONNX model onnx_model = onnx.load(\"model.onnx\")", "torch.from_numpy(cropped[None, :, :, :]) cropped = cropped.permute(0, 3, 1, 2) predicted_tensor = model(cropped)", "image = image.astype(np.float32) / 255.0 image = torch.from_numpy(image[None, :, :, :]) image =", "torch.randn(5, 3, 28, 28) # Export the model torch_out = torch.onnx._export(model, # model", "x): out = self.block1(x) out = self.block2(out) # flatten the dataset # ipdb;", "inputs) \"model.onnx-2\", # where to save the model (can be a file or", "dataset # ipdb; ipdb.set_trace() out = out.view(-1, 32 * 7 * 7) out", "inspect_model(): # Input image into the ONNX model onnx_model = onnx.load(\"model.onnx\") model =", "into the ONNX model onnx_model = onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\")", "import torch.onnx # import onnx # import onnx_caffe2.backend # from onnx import checker,", "output + chr(48) else: output = output + j return output class CNN(nn.Module):", "cropped.show() cropped = np.array(cropped) cropped = cv2.resize(cropped, (28, 28)) cropped = cropped.astype(np.float32) /", "elif ord(j) == 106: output = output + chr(48) else: output = output", "print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image = Image.open(img_path) image = image.convert('RGB')", "kernel_size=5, stride=1, padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU() ) # linearly self.block3", "letters.append(chr(35)) elif int(predicted_letter) == 28: letters.append(chr(46)) elif int(predicted_letter) == 29: letters.append(chr(44)) elif int(predicted_letter)", "a file or file-like object) export_params=True) # store the trained parameter weights inside", "cv2.resize(image, (28, 28)) image = image.astype(np.float32) / 255.0 image = torch.from_numpy(image[None, :, :,", "16x14x14 self.block2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2), #", "# 16x14x14 nn.LeakyReLU() ) # 16x14x14 self.block2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1,", "numpy as np import cv2 from PIL import Image import torch.utils.model_zoo as model_zoo", "== 33: letters.append(chr(59)) elif int(predicted_letter) == 34: letters.append(chr(63)) elif int(predicted_letter) == 35: letters.append(chr(33))", "int(predicted_letter) == 35: letters.append(chr(33)) elif int(predicted_letter) == 36: letters.append(chr(126)) else: letters.append(chr(97 + predicted_letter))", "<gh_stars>1-10 import torch import torch.nn as nn # import torch.onnx # import onnx", "+ j elif not number: if capW and ord(j) in range(97, 123): output", "w = width/num letters = [] for i in range(0, num): cropped =", "tuple for multiple inputs) \"model.onnx-2\", # where to save the model (can be", "32) capL = False else: output = output + j else: if ord(j)", "= np.array(image) image = cv2.resize(image, (28, 28)) image = image.astype(np.float32) / 255.0 image", "in range(97, 106): output = output + chr(ord(j)-48) elif ord(j) == 106: output", "== '#': number = True elif ord(j) == 126: if capL: capW =", "== 126: if capL: capW = True capL = True elif j ==", "= torch.from_numpy(cropped[None, :, :, :]) cropped = cropped.permute(0, 3, 1, 2) predicted_tensor =", "capL = False capW = False output = output + j elif not", "== ' ': number = False capL = False capW = False output", "= image.convert('RGB') width, height = image.size num = round(width/height/0.78) w = width/num letters", "int(predicted_letter) == 31: letters.append(chr(92)) elif int(predicted_letter) == 32: letters.append(chr(45)) elif int(predicted_letter) == 33:", "* 7, 100), nn.LeakyReLU(), nn.Linear(100, 37) ) # 1x36 def forward(self, x): out", "elif int(predicted_letter) == 34: letters.append(chr(63)) elif int(predicted_letter) == 35: letters.append(chr(33)) elif int(predicted_letter) ==", "- 32) capL = False else: output = output + j else: if", "True elif ord(j) == 126: if capL: capW = True capL = True", "print(model_out) # # # onnx_model(image) # # print(onnx_model) # onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph))", "the model (can be a file or file-like object) export_params=True) # store the", "letters.append(chr(92)) elif int(predicted_letter) == 32: letters.append(chr(45)) elif int(predicted_letter) == 33: letters.append(chr(59)) elif int(predicted_letter)", "image = torch.from_numpy(image[None, :, :, :]) image = image.permute(0, 3, 1, 2) W", "stride=1, padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU() ) # 16x14x14 self.block2 =", "False capW = False output = output + j elif not number: if", "= output + chr(48) else: output = output + j return output class", "padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU() ) # 16x14x14 self.block2 = nn.Sequential(", "+ 1) * w, height)) # cropped.show() cropped = np.array(cropped) cropped = cv2.resize(cropped,", "32: letters.append(chr(45)) elif int(predicted_letter) == 33: letters.append(chr(59)) elif int(predicted_letter) == 34: letters.append(chr(63)) elif", ":, :]) image = image.permute(0, 3, 1, 2) W = {model.graph.input[0].name: image.data.numpy()} model_out", "= False capL = False capW = False output = output + j", "predicted_letter = torch.max(predicted_tensor, 1) if int(predicted_letter) == 26: letters.append(chr(32)) elif int(predicted_letter) == 27:", "elif int(predicted_letter) == 32: letters.append(chr(45)) elif int(predicted_letter) == 33: letters.append(chr(59)) elif int(predicted_letter) ==", "torch.from_numpy(image[None, :, :, :]) image = image.permute(0, 3, 1, 2) W = {model.graph.input[0].name:", "out = self.block1(x) out = self.block2(out) # flatten the dataset # ipdb; ipdb.set_trace()", "out.view(-1, 32 * 7 * 7) out = self.block3(out) return out # print(make_prediction(\"test/Prairie.jpg\"))", "27: letters.append(chr(35)) elif int(predicted_letter) == 28: letters.append(chr(46)) elif int(predicted_letter) == 29: letters.append(chr(44)) elif", "file or file-like object) export_params=True) # store the trained parameter weights inside the", "be a file or file-like object) export_params=True) # store the trained parameter weights", "for multiple inputs) \"model.onnx-2\", # where to save the model (can be a", "cropped = cropped.astype(np.float32) / 255.0 cropped = torch.from_numpy(cropped[None, :, :, :]) cropped =", "\"model.onnx-2\", # where to save the model (can be a file or file-like", "= onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model) image = Image.open(\"z.jpg\") # # image = image.convert('RGB')", "= Image.open(img_path) image = image.convert('RGB') width, height = image.size num = round(width/height/0.78) w", "* 7 * 7, 100), nn.LeakyReLU(), nn.Linear(100, 37) ) # 1x36 def forward(self,", "106: output = output + chr(48) else: output = output + j return", "run x, # model input (or a tuple for multiple inputs) \"model.onnx-2\", #", "Input to the model x = torch.randn(5, 3, 28, 28) # Export the", "elif int(predicted_letter) == 33: letters.append(chr(59)) elif int(predicted_letter) == 34: letters.append(chr(63)) elif int(predicted_letter) ==", "= output + j elif not number: if capW and ord(j) in range(97,", "__init__(self): super(CNN, self).__init__() self.block1 = nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2),", "= cropped.permute(0, 3, 1, 2) predicted_tensor = model(cropped) _, predicted_letter = torch.max(predicted_tensor, 1)", "onnx import checker, helper import torch.optim as optim import numpy as np import", "2) predicted_tensor = model(cropped) _, predicted_letter = torch.max(predicted_tensor, 1) if int(predicted_letter) == 26:", "{model.graph.input[0].name: image.data.numpy()} model_out = model.run(W)[0] print(model_out) # # # onnx_model(image) # # print(onnx_model)", "# onnx_model(image) # # print(onnx_model) # onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model", "# print(onnx_model) # onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\"))", "letters.append(chr(46)) elif int(predicted_letter) == 29: letters.append(chr(44)) elif int(predicted_letter) == 30: letters.append(chr(58)) elif int(predicted_letter)", "capW and ord(j) in range(97, 123): output = output + chr(ord(j) - 32)", "= output + chr(ord(j) - 32) capL = False else: output = output", "= self.block2(out) # flatten the dataset # ipdb; ipdb.set_trace() out = out.view(-1, 32", "W = {model.graph.input[0].name: image.data.numpy()} model_out = model.run(W)[0] print(model_out) # # # onnx_model(image) #", "= round(width/height/0.78) w = width/num letters = [] for i in range(0, num):", ":, :, :]) image = image.permute(0, 3, 1, 2) W = {model.graph.input[0].name: image.data.numpy()}", "output + j else: if ord(j) in range(97, 106): output = output +", "* w, height)) # cropped.show() cropped = np.array(cropped) cropped = cv2.resize(cropped, (28, 28))", "self.block2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7", "multiple inputs) \"model.onnx-2\", # where to save the model (can be a file", "cropped = image.crop((i * w, 0, (i + 1) * w, height)) #", "elif int(predicted_letter) == 28: letters.append(chr(46)) elif int(predicted_letter) == 29: letters.append(chr(44)) elif int(predicted_letter) ==", "= cropped.astype(np.float32) / 255.0 cropped = torch.from_numpy(cropped[None, :, :, :]) cropped = cropped.permute(0,", "+ chr(ord(j) - 32) elif capL and ord(j) in range(97, 123): output =", "- 32) elif capL and ord(j) in range(97, 123): output = output +", "= CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input to the model x = torch.randn(5, 3, 28,", "model_out = model.run(W)[0] print(model_out) # # # onnx_model(image) # # print(onnx_model) # onnx.checker.check_model(onnx_model)", "# 3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2), # 16x28x28 nn.MaxPool2d(kernel_size=2), # 16x14x14 nn.LeakyReLU()", "3, 1, 2) predicted_tensor = model(cropped) _, predicted_letter = torch.max(predicted_tensor, 1) if int(predicted_letter)", "j == '#': number = True elif ord(j) == 126: if capL: capW", "inside the model file def inspect_model(): # Input image into the ONNX model", "26: letters.append(chr(32)) elif int(predicted_letter) == 27: letters.append(chr(35)) elif int(predicted_letter) == 28: letters.append(chr(46)) elif", "super(CNN, self).__init__() self.block1 = nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=2), #", "100), nn.LeakyReLU(), nn.Linear(100, 37) ) # 1x36 def forward(self, x): out = self.block1(x)", "else: output = output + j else: if ord(j) in range(97, 106): output", "# Input image into the ONNX model onnx_model = onnx.load(\"model.onnx\") model = onnx_caffe2.backend.prepare(onnx_model)", "# Export the model torch_out = torch.onnx._export(model, # model being run x, #", "= {model.graph.input[0].name: image.data.numpy()} model_out = model.run(W)[0] print(model_out) # # # onnx_model(image) # #", "CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input to the model x = torch.randn(5, 3, 28, 28)", "ord(j) == 126: if capL: capW = True capL = True elif j", "# 16x14x14 self.block2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2),", "import onnx_caffe2.backend # from onnx import checker, helper import torch.optim as optim import", "== 30: letters.append(chr(58)) elif int(predicted_letter) == 31: letters.append(chr(92)) elif int(predicted_letter) == 32: letters.append(chr(45))", "int(predicted_letter) == 27: letters.append(chr(35)) elif int(predicted_letter) == 28: letters.append(chr(46)) elif int(predicted_letter) == 29:", "int(predicted_letter) == 29: letters.append(chr(44)) elif int(predicted_letter) == 30: letters.append(chr(58)) elif int(predicted_letter) == 31:", "int(predicted_letter) == 26: letters.append(chr(32)) elif int(predicted_letter) == 27: letters.append(chr(35)) elif int(predicted_letter) == 28:", "elif j == ' ': number = False capL = False capW =", "30: letters.append(chr(58)) elif int(predicted_letter) == 31: letters.append(chr(92)) elif int(predicted_letter) == 32: letters.append(chr(45)) elif", "out_channels=32, kernel_size=5, stride=1, padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU() ) # linearly", "16x14x14 nn.LeakyReLU() ) # 16x14x14 self.block2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2),", "= torch.randn(5, 3, 28, 28) # Export the model torch_out = torch.onnx._export(model, #", "output = output + chr(48) else: output = output + j return output", "checker, helper import torch.optim as optim import numpy as np import cv2 from", "= False capL = False capW = False for j in letters: if", "padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU() ) # linearly self.block3 = nn.Sequential(", "export_params=True) # store the trained parameter weights inside the model file def inspect_model():", "# 1x36 def forward(self, x): out = self.block1(x) out = self.block2(out) # flatten", "capW = False for j in letters: if j == '#': number =", "= cv2.resize(image, (28, 28)) image = image.astype(np.float32) / 255.0 image = torch.from_numpy(image[None, :,", "/ 255.0 image = torch.from_numpy(image[None, :, :, :]) image = image.permute(0, 3, 1,", "stride=1, padding=2), # 32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU() ) # linearly self.block3 =", "out = self.block2(out) # flatten the dataset # ipdb; ipdb.set_trace() out = out.view(-1,", "print(onnx_model) # onnx.checker.check_model(onnx_model) # # print(onnx.helper.printable_graph(onnx_model.graph)) def make_prediction(img_path): model = CNN() model.load_state_dict(torch.load(\"final_model/model.pth\")) image", "= \"\" number = False capL = False capW = False for j", "to the model x = torch.randn(5, 3, 28, 28) # Export the model", "Image.open(\"z.jpg\") # # image = image.convert('RGB') image = np.array(image) image = cv2.resize(image, (28,", "def __init__(self): super(CNN, self).__init__() self.block1 = nn.Sequential( # 3x28x28 nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1,", "import torch.utils.model_zoo as model_zoo import torch.onnx def export_model(): model = CNN() model.load_state_dict(torch.load(\"model.pth\")) #", "import numpy as np import cv2 from PIL import Image import torch.utils.model_zoo as", "letters.append(chr(63)) elif int(predicted_letter) == 35: letters.append(chr(33)) elif int(predicted_letter) == 36: letters.append(chr(126)) else: letters.append(chr(97", "= False output = output + j elif not number: if capW and", "being run x, # model input (or a tuple for multiple inputs) \"model.onnx-2\",", "not number: if capW and ord(j) in range(97, 123): output = output +", "round(width/height/0.78) w = width/num letters = [] for i in range(0, num): cropped", "for i in range(0, num): cropped = image.crop((i * w, 0, (i +", "torch.onnx # import onnx # import onnx_caffe2.backend # from onnx import checker, helper", "# ipdb; ipdb.set_trace() out = out.view(-1, 32 * 7 * 7) out =", "return output class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.block1 = nn.Sequential( # 3x28x28", "nn.LeakyReLU() ) # 16x14x14 self.block2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), #", "= width/num letters = [] for i in range(0, num): cropped = image.crop((i", "ord(j) in range(97, 123): output = output + chr(ord(j) - 32) capL =", ") # 16x14x14 self.block2 = nn.Sequential( nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), # 32x14x14", "torch.utils.model_zoo as model_zoo import torch.onnx def export_model(): model = CNN() model.load_state_dict(torch.load(\"model.pth\")) # Input", "range(97, 106): output = output + chr(ord(j)-48) elif ord(j) == 106: output =", "# 32x14x14 nn.MaxPool2d(kernel_size=2), # 32x7x7 nn.LeakyReLU() ) # linearly self.block3 = nn.Sequential( nn.Linear(32", "j in letters: if j == '#': number = True elif ord(j) ==" ]
[ "quer converter para dólar: ')) dolar = real / 3.27 print(f'Com R${real} você", "converter para dólar: ')) dolar = real / 3.27 print(f'Com R${real} você pode", "voce quer converter para dólar: ')) dolar = real / 3.27 print(f'Com R${real}", "float(input('Insíra quantos reais voce quer converter para dólar: ')) dolar = real /", "para dólar: ')) dolar = real / 3.27 print(f'Com R${real} você pode comprar", "quantos reais voce quer converter para dólar: ')) dolar = real / 3.27", "reais voce quer converter para dólar: ')) dolar = real / 3.27 print(f'Com", "dólar: ')) dolar = real / 3.27 print(f'Com R${real} você pode comprar US${dolar}!')", "= float(input('Insíra quantos reais voce quer converter para dólar: ')) dolar = real", "real = float(input('Insíra quantos reais voce quer converter para dólar: ')) dolar =" ]
[ "['Michael', 'Bob', 'Tracy'] print(classmates) # 取第一个元素 print(classmates[0]) # 取最后一个元素 print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\")", "# 删除最后一个元素 classmates.pop() print(classmates) # 删除指定位置的元素 classmates.pop(3) print(classmates) # 替换指定位置元素 classmates[0] = \"OK\"", "print(classmates) # 取第一个元素 print(classmates[0]) # 取最后一个元素 print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置", "classmates.insert(1, \"Walker\") print(classmates) # 删除最后一个元素 classmates.pop() print(classmates) # 删除指定位置的元素 classmates.pop(3) print(classmates) # 替换指定位置元素", "print(classmates) # 删除最后一个元素 classmates.pop() print(classmates) # 删除指定位置的元素 classmates.pop(3) print(classmates) # 替换指定位置元素 classmates[0] =", "# 删除指定位置的元素 classmates.pop(3) print(classmates) # 替换指定位置元素 classmates[0] = \"OK\" print(classmates) # 获取长度 print(len(classmates))", "# 取第一个元素 print(classmates[0]) # 取最后一个元素 print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置 classmates.insert(1,", "# 取最后一个元素 print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates) #", "# 把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates) # 删除最后一个元素 classmates.pop() print(classmates) # 删除指定位置的元素 classmates.pop(3) print(classmates)", "# 初始化list classmates = ['Michael', 'Bob', 'Tracy'] print(classmates) # 取第一个元素 print(classmates[0]) # 取最后一个元素", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # 初始化list classmates = ['Michael', 'Bob',", "初始化list classmates = ['Michael', 'Bob', 'Tracy'] print(classmates) # 取第一个元素 print(classmates[0]) # 取最后一个元素 print(classmates[-1])", "python3 # -*- coding: utf-8 -*- # 初始化list classmates = ['Michael', 'Bob', 'Tracy']", "取最后一个元素 print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates) # 删除最后一个元素", "= ['Michael', 'Bob', 'Tracy'] print(classmates) # 取第一个元素 print(classmates[0]) # 取最后一个元素 print(classmates[-1]) # 追加元素到末尾", "追加元素到末尾 classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates) # 删除最后一个元素 classmates.pop() print(classmates) #", "classmates.pop() print(classmates) # 删除指定位置的元素 classmates.pop(3) print(classmates) # 替换指定位置元素 classmates[0] = \"OK\" print(classmates) #", "print(classmates) # 删除指定位置的元素 classmates.pop(3) print(classmates) # 替换指定位置元素 classmates[0] = \"OK\" print(classmates) # 获取长度", "-*- coding: utf-8 -*- # 初始化list classmates = ['Michael', 'Bob', 'Tracy'] print(classmates) #", "print(classmates) # 把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates) # 删除最后一个元素 classmates.pop() print(classmates) # 删除指定位置的元素 classmates.pop(3)", "utf-8 -*- # 初始化list classmates = ['Michael', 'Bob', 'Tracy'] print(classmates) # 取第一个元素 print(classmates[0])", "'Tracy'] print(classmates) # 取第一个元素 print(classmates[0]) # 取最后一个元素 print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\") print(classmates) #", "-*- # 初始化list classmates = ['Michael', 'Bob', 'Tracy'] print(classmates) # 取第一个元素 print(classmates[0]) #", "<filename>data_structure/list.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # 初始化list classmates = ['Michael',", "coding: utf-8 -*- # 初始化list classmates = ['Michael', 'Bob', 'Tracy'] print(classmates) # 取第一个元素", "删除最后一个元素 classmates.pop() print(classmates) # 删除指定位置的元素 classmates.pop(3) print(classmates) # 替换指定位置元素 classmates[0] = \"OK\" print(classmates)", "把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates) # 删除最后一个元素 classmates.pop() print(classmates) # 删除指定位置的元素 classmates.pop(3) print(classmates) #", "print(classmates[0]) # 取最后一个元素 print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates)", "# 追加元素到末尾 classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates) # 删除最后一个元素 classmates.pop() print(classmates)", "\"Walker\") print(classmates) # 删除最后一个元素 classmates.pop() print(classmates) # 删除指定位置的元素 classmates.pop(3) print(classmates) # 替换指定位置元素 classmates[0]", "print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates) # 删除最后一个元素 classmates.pop()", "classmates = ['Michael', 'Bob', 'Tracy'] print(classmates) # 取第一个元素 print(classmates[0]) # 取最后一个元素 print(classmates[-1]) #", "# -*- coding: utf-8 -*- # 初始化list classmates = ['Michael', 'Bob', 'Tracy'] print(classmates)", "classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置 classmates.insert(1, \"Walker\") print(classmates) # 删除最后一个元素 classmates.pop() print(classmates) # 删除指定位置的元素", "取第一个元素 print(classmates[0]) # 取最后一个元素 print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\") print(classmates) # 把元素插入到指定的位置 classmates.insert(1, \"Walker\")", "'Bob', 'Tracy'] print(classmates) # 取第一个元素 print(classmates[0]) # 取最后一个元素 print(classmates[-1]) # 追加元素到末尾 classmates.append(\"Mary\") print(classmates)" ]
[ "IntegerField( label='League ID', validators=[validators.InputRequired()] ) year = IntegerField( label='Year', default=2021, validators=[validators.InputRequired()] ) espn_s2", ") espn_s2 = StringField( label='ESPN_S2', validators=[validators.InputRequired()] ) swid = StringField( label='swid', validators=[validators.InputRequired()] )", "default=2021, validators=[validators.InputRequired()] ) espn_s2 = StringField( label='ESPN_S2', validators=[validators.InputRequired()] ) swid = StringField( label='swid',", "label='swid', validators=[validators.InputRequired()] ) class TradeForms(Form): name = SelectField(\"Placeholder\", choices=[]) class SelectFormList(Form): name_entries =", "validators=[validators.InputRequired()] ) year = IntegerField( label='Year', default=2021, validators=[validators.InputRequired()] ) espn_s2 = StringField( label='ESPN_S2',", "wtforms import Form, IntegerField, StringField, validators, SelectField, FieldList, FormField class InputForm(Form): league_id =", "SelectField, FieldList, FormField class InputForm(Form): league_id = IntegerField( label='League ID', validators=[validators.InputRequired()] ) year", "StringField( label='ESPN_S2', validators=[validators.InputRequired()] ) swid = StringField( label='swid', validators=[validators.InputRequired()] ) class TradeForms(Form): name", "FormField class InputForm(Form): league_id = IntegerField( label='League ID', validators=[validators.InputRequired()] ) year = IntegerField(", "StringField, validators, SelectField, FieldList, FormField class InputForm(Form): league_id = IntegerField( label='League ID', validators=[validators.InputRequired()]", "ID', validators=[validators.InputRequired()] ) year = IntegerField( label='Year', default=2021, validators=[validators.InputRequired()] ) espn_s2 = StringField(", "IntegerField, StringField, validators, SelectField, FieldList, FormField class InputForm(Form): league_id = IntegerField( label='League ID',", "swid = StringField( label='swid', validators=[validators.InputRequired()] ) class TradeForms(Form): name = SelectField(\"Placeholder\", choices=[]) class", ") year = IntegerField( label='Year', default=2021, validators=[validators.InputRequired()] ) espn_s2 = StringField( label='ESPN_S2', validators=[validators.InputRequired()]", "class InputForm(Form): league_id = IntegerField( label='League ID', validators=[validators.InputRequired()] ) year = IntegerField( label='Year',", "validators=[validators.InputRequired()] ) class TradeForms(Form): name = SelectField(\"Placeholder\", choices=[]) class SelectFormList(Form): name_entries = FieldList(FormField(TradeForms))", "validators, SelectField, FieldList, FormField class InputForm(Form): league_id = IntegerField( label='League ID', validators=[validators.InputRequired()] )", "= StringField( label='ESPN_S2', validators=[validators.InputRequired()] ) swid = StringField( label='swid', validators=[validators.InputRequired()] ) class TradeForms(Form):", "IntegerField( label='Year', default=2021, validators=[validators.InputRequired()] ) espn_s2 = StringField( label='ESPN_S2', validators=[validators.InputRequired()] ) swid =", "FieldList, FormField class InputForm(Form): league_id = IntegerField( label='League ID', validators=[validators.InputRequired()] ) year =", "<reponame>pritish-devurkar/Hackathon2021_FFL from wtforms import Form, IntegerField, StringField, validators, SelectField, FieldList, FormField class InputForm(Form):", "Form, IntegerField, StringField, validators, SelectField, FieldList, FormField class InputForm(Form): league_id = IntegerField( label='League", "validators=[validators.InputRequired()] ) swid = StringField( label='swid', validators=[validators.InputRequired()] ) class TradeForms(Form): name = SelectField(\"Placeholder\",", "InputForm(Form): league_id = IntegerField( label='League ID', validators=[validators.InputRequired()] ) year = IntegerField( label='Year', default=2021,", "label='Year', default=2021, validators=[validators.InputRequired()] ) espn_s2 = StringField( label='ESPN_S2', validators=[validators.InputRequired()] ) swid = StringField(", "year = IntegerField( label='Year', default=2021, validators=[validators.InputRequired()] ) espn_s2 = StringField( label='ESPN_S2', validators=[validators.InputRequired()] )", "= IntegerField( label='Year', default=2021, validators=[validators.InputRequired()] ) espn_s2 = StringField( label='ESPN_S2', validators=[validators.InputRequired()] ) swid", "= StringField( label='swid', validators=[validators.InputRequired()] ) class TradeForms(Form): name = SelectField(\"Placeholder\", choices=[]) class SelectFormList(Form):", "validators=[validators.InputRequired()] ) espn_s2 = StringField( label='ESPN_S2', validators=[validators.InputRequired()] ) swid = StringField( label='swid', validators=[validators.InputRequired()]", ") swid = StringField( label='swid', validators=[validators.InputRequired()] ) class TradeForms(Form): name = SelectField(\"Placeholder\", choices=[])", "= IntegerField( label='League ID', validators=[validators.InputRequired()] ) year = IntegerField( label='Year', default=2021, validators=[validators.InputRequired()] )", "league_id = IntegerField( label='League ID', validators=[validators.InputRequired()] ) year = IntegerField( label='Year', default=2021, validators=[validators.InputRequired()]", "from wtforms import Form, IntegerField, StringField, validators, SelectField, FieldList, FormField class InputForm(Form): league_id", "StringField( label='swid', validators=[validators.InputRequired()] ) class TradeForms(Form): name = SelectField(\"Placeholder\", choices=[]) class SelectFormList(Form): name_entries", "import Form, IntegerField, StringField, validators, SelectField, FieldList, FormField class InputForm(Form): league_id = IntegerField(", "label='League ID', validators=[validators.InputRequired()] ) year = IntegerField( label='Year', default=2021, validators=[validators.InputRequired()] ) espn_s2 =", "espn_s2 = StringField( label='ESPN_S2', validators=[validators.InputRequired()] ) swid = StringField( label='swid', validators=[validators.InputRequired()] ) class", "label='ESPN_S2', validators=[validators.InputRequired()] ) swid = StringField( label='swid', validators=[validators.InputRequired()] ) class TradeForms(Form): name =" ]
[ "re def indent(text: str) -> str: return ' ' + re.sub(r'\\n', '\\n ',", "def indent(text: str) -> str: return ' ' + re.sub(r'\\n', '\\n ', text)", "str) -> str: return ' ' + re.sub(r'\\n', '\\n ', text) def read_file(path:", "str: return ' ' + re.sub(r'\\n', '\\n ', text) def read_file(path: str) ->", "<filename>audiorename/utils.py import re def indent(text: str) -> str: return ' ' + re.sub(r'\\n',", "+ re.sub(r'\\n', '\\n ', text) def read_file(path: str) -> str: return open(path, 'r').read()", "-> str: return ' ' + re.sub(r'\\n', '\\n ', text) def read_file(path: str)", "indent(text: str) -> str: return ' ' + re.sub(r'\\n', '\\n ', text) def", "' + re.sub(r'\\n', '\\n ', text) def read_file(path: str) -> str: return open(path,", "' ' + re.sub(r'\\n', '\\n ', text) def read_file(path: str) -> str: return", "return ' ' + re.sub(r'\\n', '\\n ', text) def read_file(path: str) -> str:", "import re def indent(text: str) -> str: return ' ' + re.sub(r'\\n', '\\n" ]
[ "<gh_stars>0 from django.apps import AppConfig class GrscAppConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'grsc_app'" ]
[ "**kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space = space @abstractmethod def dist(self, point_a, point_b, **kwargs): \"\"\"Distance", "---------- point : Point-like, shape=[...] Point to evaluate. atol : float Absolute tolerance.", "def __hash__(self): \"\"\"Define a hash for the point.\"\"\" @abstractmethod def to_array(self): \"\"\"Turn the", "shape=[...] Point in the PointSet. Returns ------- path : callable Time parameterized geodesic", "Notes ----- Explicitly defining args_positions and args names ensures it works for all", "param: int Parameter defining the pointset. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\'} Point", "set. Parameters ---------- point : Point-like, shape=[...] Point to evaluate. atol : float", "into an array. Parameters ---------- points : list of Point, shape=[...] Number of", "points of type Point. Parameters ---------- param: int Parameter defining the pointset. default_point_type", "\"\"\"Check point type and transform in iterable if not the case. Parameters ----------", "Point or List of Point, shape=[...] Point in the PointSet. point_b: Point or", "super(PointSetMetric, self).__init__(**kwargs) self.space = space @abstractmethod def dist(self, point_a, point_b, **kwargs): \"\"\"Distance between", "return list_a, list_b if n_a == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if n_b", "def geodesic(self, initial_point, end_point, **kwargs): \"\"\"Compute the geodesic in the PointSet. Parameters ----------", "class PointSetMetric(ABC): r\"\"\"Class for the lenght spaces. Parameters ---------- Set : PointSet Underling", "<NAME> \"\"\" import functools import itertools from abc import ABC, abstractmethod def broadcast_lists(list_a,", "tolerance. Optional, default: backend atol. Returns ------- belongs : array-like, shape=[...] Boolean evaluating", "end_point, **kwargs): \"\"\"Compute the geodesic in the PointSet. Parameters ---------- initial_point: Point or", "the PointSet. Parameters ---------- point_a: Point or List of Point, shape=[...] Point in", "into a numpy array. Returns ------- array_point : array-like, shape=[...] An array representation", "def _dec(func): @functools.wraps(func) def _wrapped(*args, **kwargs): args = list(args) for pos, name in", "for points of a set.\"\"\" @abstractmethod def __repr__(self): \"\"\"Produce a string with a", "_wrapped(*args, **kwargs): args = list(args) for pos, name in args_positions: if name in", "the Point type. \"\"\" class PointSet(ABC): r\"\"\"Class for a set of points of", "Lead authors: <NAME> & <NAME> \"\"\" import functools import itertools from abc import", "def set_to_array(self, points): \"\"\"Convert a set of points into an array. Parameters ----------", "**kwargs): \"\"\"Compute the geodesic in the PointSet. Parameters ---------- initial_point: Point or List", "[list, tuple]): return [arg] return arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point type and", "= list(args) for pos, name in args_positions: if name in kwargs: kwargs[name] =", "name in kwargs: kwargs[name] = manipulate_input(kwargs[name]) else: args[pos] = manipulate_input(args[pos]) return func(*args, **kwargs)", ": int Number of samples. Optional, default: 1. Returns ------- samples : List", "point.\"\"\" @abstractmethod def __hash__(self): \"\"\"Define a hash for the point.\"\"\" @abstractmethod def to_array(self):", "_vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point type and transform in iterable if not the case.", "name. A tuple for each position. Notes ----- Explicitly defining args_positions and args", "def to_array(self): \"\"\"Turn the point into a numpy array. Returns ------- array_point :", "random_point(self, n_samples=1): r\"\"\"Sample random points on the PointSet. Parameters ---------- n_samples : int", ": PointSet Underling PointSet. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\' } Point type.", "Point or List of Points, shape=[...] Point in the PointSet. Returns ------- path", "} Point type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate", "in iterable if not the case. Parameters ---------- args_positions : tuple Position and", "return func(*args, **kwargs) return _wrapped return _dec class Point(ABC): r\"\"\"Class for points of", "to turn into an array. Returns ------- points_array : array-like, shape=[...] Points sampled", "**kwargs): \"\"\"Distance between two points in the PointSet. Parameters ---------- point_a: Point or", "\"\"\"Class for Stratified Spaces. Lead authors: <NAME> & <NAME> \"\"\" import functools import", "------- distance : array-like, shape=[...] Distance. \"\"\" @abstractmethod def geodesic(self, initial_point, end_point, **kwargs):", "List of Points, shape=[...] Point in the PointSet. Returns ------- path : callable", "of Points, shape=[...] Point in the PointSet. end_point: Point or List of Points,", "type Point. Parameters ---------- param: int Parameter defining the pointset. default_point_type : str,", "manipulate_input(kwargs[name]) else: args[pos] = manipulate_input(args[pos]) return func(*args, **kwargs) return _wrapped return _dec class", "Optional, default: \\'intrinsic\\'. \"\"\" def __init__(self, space: PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space =", "Point or List of Point, shape=[...] Point in the PointSet. Returns ------- distance", "1: return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if n_b == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0])", "shape=[...] An array representation of the Point type. \"\"\" class PointSet(ABC): r\"\"\"Class for", "List of Point, shape=[...] Point in the PointSet. Returns ------- distance : array-like,", "args_positions : tuple Position and corresponding argument name. A tuple for each position.", "defining args_positions and args names ensures it works for all combinations of input", "Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default:", "or List of Points, shape=[...] Point in the PointSet. Returns ------- path :", "array-like, shape=[...] Distance. \"\"\" @abstractmethod def geodesic(self, initial_point, end_point, **kwargs): \"\"\"Compute the geodesic", "with a verbal description of the point.\"\"\" @abstractmethod def __hash__(self): \"\"\"Define a hash", "@abstractmethod def geodesic(self, initial_point, end_point, **kwargs): \"\"\"Compute the geodesic in the PointSet. Parameters", "in the PointSet. Parameters ---------- initial_point: Point or List of Points, shape=[...] Point", "str, {\\'vector\\', \\'matrix\\', \\'Point\\'} Point type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\',", "_manipulate_input(arg): if not (type(arg) in [list, tuple]): return [arg] return arg def _vectorize_point(*args_positions,", "a set.\"\"\" @abstractmethod def __repr__(self): \"\"\"Produce a string with a verbal description of", "return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast lens {n_a} and {n_b}\") def _manipulate_input(arg):", ": str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" def __init__(self,", "belongs to the set. \"\"\" @abstractmethod def random_point(self, n_samples=1): r\"\"\"Sample random points on", "itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if n_b == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot", "len(list_a) n_b = len(list_b) if n_a == n_b: return list_a, list_b if n_a", "shape=[...] Point to evaluate. atol : float Absolute tolerance. Optional, default: backend atol.", "manipulate_input=_manipulate_input): \"\"\"Check point type and transform in iterable if not the case. Parameters", "== 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if n_b == 1: return itertools.zip_longest(list_a, list_b,", "array-like, shape=[...] An array representation of the Point type. \"\"\" class PointSet(ABC): r\"\"\"Class", "numpy array. Returns ------- array_point : array-like, shape=[...] An array representation of the", "Returns ------- array_point : array-like, shape=[...] An array representation of the Point type.", "------- points_array : array-like, shape=[...] Points sampled on the PointSet. \"\"\" class PointSetMetric(ABC):", "type to turn into an array. Returns ------- points_array : array-like, shape=[...] Points", "the point into a numpy array. Returns ------- array_point : array-like, shape=[...] An", "from abc import ABC, abstractmethod def broadcast_lists(list_a, list_b): \"\"\"Broadcast two lists. Similar behavior", "initial_point: Point or List of Points, shape=[...] Point in the PointSet. end_point: Point", "Optional, default: backend atol. Returns ------- belongs : array-like, shape=[...] Boolean evaluating if", "[arg] return arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point type and transform in iterable", "Returns ------- belongs : array-like, shape=[...] Boolean evaluating if point belongs to the", "\"\"\" def _dec(func): @functools.wraps(func) def _wrapped(*args, **kwargs): args = list(args) for pos, name", "------- belongs : array-like, shape=[...] Boolean evaluating if point belongs to the set.", "self.space = space @abstractmethod def dist(self, point_a, point_b, **kwargs): \"\"\"Distance between two points", "of Point, shape=[...] Point in the PointSet. Returns ------- distance : array-like, shape=[...]", "space: PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space = space @abstractmethod def dist(self, point_a, point_b,", "list_b): \"\"\"Broadcast two lists. Similar behavior as ``gs.broadcast_arrays``, but for lists. \"\"\" n_a", "the point.\"\"\" @abstractmethod def __hash__(self): \"\"\"Define a hash for the point.\"\"\" @abstractmethod def", "to_array(self): \"\"\"Turn the point into a numpy array. Returns ------- array_point : array-like,", "= len(list_a) n_b = len(list_b) if n_a == n_b: return list_a, list_b if", "point_b, **kwargs): \"\"\"Distance between two points in the PointSet. Parameters ---------- point_a: Point", "Parameters ---------- points : list of Point, shape=[...] Number of samples of point", "arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point type and transform in iterable if not", "tuple Position and corresponding argument name. A tuple for each position. Notes -----", "string with a verbal description of the point.\"\"\" @abstractmethod def __hash__(self): \"\"\"Define a", "\"\"\" @abstractmethod def belongs(self, point, atol): r\"\"\"Evaluate if a point belongs to the", "Explicitly defining args_positions and args names ensures it works for all combinations of", "and {n_b}\") def _manipulate_input(arg): if not (type(arg) in [list, tuple]): return [arg] return", "PointSet. \"\"\" @abstractmethod def set_to_array(self, points): \"\"\"Convert a set of points into an", "the PointSet. Parameters ---------- n_samples : int Number of samples. Optional, default: 1.", "itertools from abc import ABC, abstractmethod def broadcast_lists(list_a, list_b): \"\"\"Broadcast two lists. Similar", ": str, {\\'vector\\', \\'matrix\\', \\'Point\\' } Point type. Optional, default: \\'Point\\'. default_coords_type :", "list(args) for pos, name in args_positions: if name in kwargs: kwargs[name] = manipulate_input(kwargs[name])", "of a set.\"\"\" @abstractmethod def __repr__(self): \"\"\"Produce a string with a verbal description", "random points on the PointSet. Parameters ---------- n_samples : int Number of samples.", "for Stratified Spaces. Lead authors: <NAME> & <NAME> \"\"\" import functools import itertools", "points of a set.\"\"\" @abstractmethod def __repr__(self): \"\"\"Produce a string with a verbal", "Points, shape=[...] Point in the PointSet. Returns ------- path : callable Time parameterized", "in kwargs: kwargs[name] = manipulate_input(kwargs[name]) else: args[pos] = manipulate_input(args[pos]) return func(*args, **kwargs) return", "@abstractmethod def set_to_array(self, points): \"\"\"Convert a set of points into an array. Parameters", "{\\'vector\\', \\'matrix\\', \\'Point\\' } Point type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\',", "points on the PointSet. Parameters ---------- n_samples : int Number of samples. Optional,", "A tuple for each position. Notes ----- Explicitly defining args_positions and args names", "lists. \"\"\" n_a = len(list_a) n_b = len(list_b) if n_a == n_b: return", "the case. Parameters ---------- args_positions : tuple Position and corresponding argument name. A", "verbal description of the point.\"\"\" @abstractmethod def __hash__(self): \"\"\"Define a hash for the", "for each position. Notes ----- Explicitly defining args_positions and args names ensures it", "---------- n_samples : int Number of samples. Optional, default: 1. Returns ------- samples", "__hash__(self): \"\"\"Define a hash for the point.\"\"\" @abstractmethod def to_array(self): \"\"\"Turn the point", "Parameters ---------- param: int Parameter defining the pointset. default_point_type : str, {\\'vector\\', \\'matrix\\',", "Exception(f\"Cannot broadcast lens {n_a} and {n_b}\") def _manipulate_input(arg): if not (type(arg) in [list,", "len(list_b) if n_a == n_b: return list_a, list_b if n_a == 1: return", "PointSet. point_b: Point or List of Point, shape=[...] Point in the PointSet. Returns", "array-like, shape=[...] Points sampled on the PointSet. \"\"\" class PointSetMetric(ABC): r\"\"\"Class for the", "\"\"\" import functools import itertools from abc import ABC, abstractmethod def broadcast_lists(list_a, list_b):", "list of Point, shape=[...] Number of samples of point type to turn into", "in the PointSet. Returns ------- path : callable Time parameterized geodesic curve. \"\"\"", "return _dec class Point(ABC): r\"\"\"Class for points of a set.\"\"\" @abstractmethod def __repr__(self):", "set_to_array(self, points): \"\"\"Convert a set of points into an array. Parameters ---------- points", "for the lenght spaces. Parameters ---------- Set : PointSet Underling PointSet. default_point_type :", "lists. Similar behavior as ``gs.broadcast_arrays``, but for lists. \"\"\" n_a = len(list_a) n_b", "point into a numpy array. Returns ------- array_point : array-like, shape=[...] An array", "== 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast lens {n_a} and {n_b}\")", "if point belongs to the set. \"\"\" @abstractmethod def random_point(self, n_samples=1): r\"\"\"Sample random", "etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" @abstractmethod def belongs(self, point, atol): r\"\"\"Evaluate", "tuple]): return [arg] return arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point type and transform", "Optional, default: \\'intrinsic\\'. \"\"\" @abstractmethod def belongs(self, point, atol): r\"\"\"Evaluate if a point", "r\"\"\"Evaluate if a point belongs to the set. Parameters ---------- point : Point-like,", ": float Absolute tolerance. Optional, default: backend atol. Returns ------- belongs : array-like,", "type. Optional, default: \\'intrinsic\\'. \"\"\" def __init__(self, space: PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space", "array. Parameters ---------- points : list of Point, shape=[...] Number of samples of", "the point.\"\"\" @abstractmethod def to_array(self): \"\"\"Turn the point into a numpy array. Returns", "default: 1. Returns ------- samples : List of Point Points sampled on the", "Returns ------- samples : List of Point Points sampled on the PointSet. \"\"\"", "Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" @abstractmethod def belongs(self, point, atol): r\"\"\"Evaluate if", "to the set. Parameters ---------- point : Point-like, shape=[...] Point to evaluate. atol", "Parameters ---------- n_samples : int Number of samples. Optional, default: 1. Returns -------", "str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" def __init__(self, space:", "---------- param: int Parameter defining the pointset. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\'}", "it works for all combinations of input calling. \"\"\" def _dec(func): @functools.wraps(func) def", "the lenght spaces. Parameters ---------- Set : PointSet Underling PointSet. default_point_type : str,", "Position and corresponding argument name. A tuple for each position. Notes ----- Explicitly", "names ensures it works for all combinations of input calling. \"\"\" def _dec(func):", "of the point.\"\"\" @abstractmethod def __hash__(self): \"\"\"Define a hash for the point.\"\"\" @abstractmethod", "Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" def __init__(self, space: PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs)", "\"\"\"Convert a set of points into an array. Parameters ---------- points : list", "a set of points into an array. Parameters ---------- points : list of", "1. Returns ------- samples : List of Point Points sampled on the PointSet.", "Returns ------- points_array : array-like, shape=[...] Points sampled on the PointSet. \"\"\" class", "List of Point Points sampled on the PointSet. \"\"\" @abstractmethod def set_to_array(self, points):", "Parameters ---------- initial_point: Point or List of Points, shape=[...] Point in the PointSet.", "def belongs(self, point, atol): r\"\"\"Evaluate if a point belongs to the set. Parameters", "points): \"\"\"Convert a set of points into an array. Parameters ---------- points :", "Point in the PointSet. point_b: Point or List of Point, shape=[...] Point in", "broadcast lens {n_a} and {n_b}\") def _manipulate_input(arg): if not (type(arg) in [list, tuple]):", "args names ensures it works for all combinations of input calling. \"\"\" def", "\\'matrix\\', \\'Point\\' } Point type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\',", "abc import ABC, abstractmethod def broadcast_lists(list_a, list_b): \"\"\"Broadcast two lists. Similar behavior as", "list_a, list_b if n_a == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if n_b ==", "shape=[...] Boolean evaluating if point belongs to the set. \"\"\" @abstractmethod def random_point(self,", "type. \"\"\" class PointSet(ABC): r\"\"\"Class for a set of points of type Point.", "\"\"\" n_a = len(list_a) n_b = len(list_b) if n_a == n_b: return list_a,", ": array-like, shape=[...] Distance. \"\"\" @abstractmethod def geodesic(self, initial_point, end_point, **kwargs): \"\"\"Compute the", "= len(list_b) if n_a == n_b: return list_a, list_b if n_a == 1:", "\\'intrinsic\\'. \"\"\" @abstractmethod def belongs(self, point, atol): r\"\"\"Evaluate if a point belongs to", "backend atol. Returns ------- belongs : array-like, shape=[...] Boolean evaluating if point belongs", "r\"\"\"Class for points of a set.\"\"\" @abstractmethod def __repr__(self): \"\"\"Produce a string with", "of points into an array. Parameters ---------- points : list of Point, shape=[...]", "atol): r\"\"\"Evaluate if a point belongs to the set. Parameters ---------- point :", "\\'Point\\'} Point type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate", "return _wrapped return _dec class Point(ABC): r\"\"\"Class for points of a set.\"\"\" @abstractmethod", "{n_b}\") def _manipulate_input(arg): if not (type(arg) in [list, tuple]): return [arg] return arg", "pos, name in args_positions: if name in kwargs: kwargs[name] = manipulate_input(kwargs[name]) else: args[pos]", "of Points, shape=[...] Point in the PointSet. Returns ------- path : callable Time", "---------- args_positions : tuple Position and corresponding argument name. A tuple for each", "in [list, tuple]): return [arg] return arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point type", "def _manipulate_input(arg): if not (type(arg) in [list, tuple]): return [arg] return arg def", "return arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point type and transform in iterable if", "Spaces. Lead authors: <NAME> & <NAME> \"\"\" import functools import itertools from abc", "shape=[...] Number of samples of point type to turn into an array. Returns", "if not (type(arg) in [list, tuple]): return [arg] return arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input):", "n_b = len(list_b) if n_a == n_b: return list_a, list_b if n_a ==", "\"\"\"Produce a string with a verbal description of the point.\"\"\" @abstractmethod def __hash__(self):", "point belongs to the set. \"\"\" @abstractmethod def random_point(self, n_samples=1): r\"\"\"Sample random points", "default: \\'intrinsic\\'. \"\"\" @abstractmethod def belongs(self, point, atol): r\"\"\"Evaluate if a point belongs", "the PointSet. end_point: Point or List of Points, shape=[...] Point in the PointSet.", "a hash for the point.\"\"\" @abstractmethod def to_array(self): \"\"\"Turn the point into a", "functools import itertools from abc import ABC, abstractmethod def broadcast_lists(list_a, list_b): \"\"\"Broadcast two", "combinations of input calling. \"\"\" def _dec(func): @functools.wraps(func) def _wrapped(*args, **kwargs): args =", "belongs(self, point, atol): r\"\"\"Evaluate if a point belongs to the set. Parameters ----------", "int Number of samples. Optional, default: 1. Returns ------- samples : List of", "Points, shape=[...] Point in the PointSet. end_point: Point or List of Points, shape=[...]", "of Point, shape=[...] Number of samples of point type to turn into an", "shape=[...] Points sampled on the PointSet. \"\"\" class PointSetMetric(ABC): r\"\"\"Class for the lenght", "an array. Returns ------- points_array : array-like, shape=[...] Points sampled on the PointSet.", "\"\"\" @abstractmethod def geodesic(self, initial_point, end_point, **kwargs): \"\"\"Compute the geodesic in the PointSet.", "kwargs: kwargs[name] = manipulate_input(kwargs[name]) else: args[pos] = manipulate_input(args[pos]) return func(*args, **kwargs) return _wrapped", "of samples. Optional, default: 1. Returns ------- samples : List of Point Points", ": List of Point Points sampled on the PointSet. \"\"\" @abstractmethod def set_to_array(self,", "if n_b == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast lens {n_a}", "broadcast_lists(list_a, list_b): \"\"\"Broadcast two lists. Similar behavior as ``gs.broadcast_arrays``, but for lists. \"\"\"", "``gs.broadcast_arrays``, but for lists. \"\"\" n_a = len(list_a) n_b = len(list_b) if n_a", "all combinations of input calling. \"\"\" def _dec(func): @functools.wraps(func) def _wrapped(*args, **kwargs): args", "self).__init__(**kwargs) self.space = space @abstractmethod def dist(self, point_a, point_b, **kwargs): \"\"\"Distance between two", "the PointSet. Returns ------- distance : array-like, shape=[...] Distance. \"\"\" @abstractmethod def geodesic(self,", "the PointSet. \"\"\" @abstractmethod def set_to_array(self, points): \"\"\"Convert a set of points into", "atol. Returns ------- belongs : array-like, shape=[...] Boolean evaluating if point belongs to", "n_samples : int Number of samples. Optional, default: 1. Returns ------- samples :", ": tuple Position and corresponding argument name. A tuple for each position. Notes", "---------- initial_point: Point or List of Points, shape=[...] Point in the PointSet. end_point:", "point belongs to the set. Parameters ---------- point : Point-like, shape=[...] Point to", "_wrapped return _dec class Point(ABC): r\"\"\"Class for points of a set.\"\"\" @abstractmethod def", "calling. \"\"\" def _dec(func): @functools.wraps(func) def _wrapped(*args, **kwargs): args = list(args) for pos,", "points_array : array-like, shape=[...] Points sampled on the PointSet. \"\"\" class PointSetMetric(ABC): r\"\"\"Class", "list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast lens {n_a} and {n_b}\") def _manipulate_input(arg): if not", "args[pos] = manipulate_input(args[pos]) return func(*args, **kwargs) return _wrapped return _dec class Point(ABC): r\"\"\"Class", "in the PointSet. end_point: Point or List of Points, shape=[...] Point in the", "between two points in the PointSet. Parameters ---------- point_a: Point or List of", "\\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" def __init__(self, space: PointSet, **kwargs):", "\"\"\"Distance between two points in the PointSet. Parameters ---------- point_a: Point or List", "and transform in iterable if not the case. Parameters ---------- args_positions : tuple", "if n_a == n_b: return list_a, list_b if n_a == 1: return itertools.zip_longest(list_a,", ": Point-like, shape=[...] Point to evaluate. atol : float Absolute tolerance. Optional, default:", "\"\"\" class PointSet(ABC): r\"\"\"Class for a set of points of type Point. Parameters", "PointSet. \"\"\" class PointSetMetric(ABC): r\"\"\"Class for the lenght spaces. Parameters ---------- Set :", "works for all combinations of input calling. \"\"\" def _dec(func): @functools.wraps(func) def _wrapped(*args,", "__repr__(self): \"\"\"Produce a string with a verbal description of the point.\"\"\" @abstractmethod def", "a point belongs to the set. Parameters ---------- point : Point-like, shape=[...] Point", "{\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" def __init__(self, space: PointSet,", "lens {n_a} and {n_b}\") def _manipulate_input(arg): if not (type(arg) in [list, tuple]): return", "@abstractmethod def __repr__(self): \"\"\"Produce a string with a verbal description of the point.\"\"\"", "a string with a verbal description of the point.\"\"\" @abstractmethod def __hash__(self): \"\"\"Define", "Number of samples. Optional, default: 1. Returns ------- samples : List of Point", "a verbal description of the point.\"\"\" @abstractmethod def __hash__(self): \"\"\"Define a hash for", "\\'Point\\' } Point type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc}", "default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\'} Point type. Optional, default: \\'Point\\'. default_coords_type :", "@abstractmethod def belongs(self, point, atol): r\"\"\"Evaluate if a point belongs to the set.", "pointset. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\'} Point type. Optional, default: \\'Point\\'. default_coords_type", "or List of Points, shape=[...] Point in the PointSet. end_point: Point or List", "but for lists. \"\"\" n_a = len(list_a) n_b = len(list_b) if n_a ==", "and args names ensures it works for all combinations of input calling. \"\"\"", "samples of point type to turn into an array. Returns ------- points_array :", "kwargs[name] = manipulate_input(kwargs[name]) else: args[pos] = manipulate_input(args[pos]) return func(*args, **kwargs) return _wrapped return", "Points sampled on the PointSet. \"\"\" @abstractmethod def set_to_array(self, points): \"\"\"Convert a set", "\"\"\"Define a hash for the point.\"\"\" @abstractmethod def to_array(self): \"\"\"Turn the point into", "samples. Optional, default: 1. Returns ------- samples : List of Point Points sampled", "on the PointSet. Parameters ---------- n_samples : int Number of samples. Optional, default:", "set of points of type Point. Parameters ---------- param: int Parameter defining the", "{\\'vector\\', \\'matrix\\', \\'Point\\'} Point type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\',", "for the point.\"\"\" @abstractmethod def to_array(self): \"\"\"Turn the point into a numpy array.", "PointSet. Parameters ---------- initial_point: Point or List of Points, shape=[...] Point in the", "corresponding argument name. A tuple for each position. Notes ----- Explicitly defining args_positions", "\"\"\"Compute the geodesic in the PointSet. Parameters ---------- initial_point: Point or List of", "Points sampled on the PointSet. \"\"\" class PointSetMetric(ABC): r\"\"\"Class for the lenght spaces.", "r\"\"\"Class for the lenght spaces. Parameters ---------- Set : PointSet Underling PointSet. default_point_type", "point, atol): r\"\"\"Evaluate if a point belongs to the set. Parameters ---------- point", "import ABC, abstractmethod def broadcast_lists(list_a, list_b): \"\"\"Broadcast two lists. Similar behavior as ``gs.broadcast_arrays``,", "else: args[pos] = manipulate_input(args[pos]) return func(*args, **kwargs) return _wrapped return _dec class Point(ABC):", "args_positions and args names ensures it works for all combinations of input calling.", "{\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" @abstractmethod def belongs(self, point,", ": str, {\\'vector\\', \\'matrix\\', \\'Point\\'} Point type. Optional, default: \\'Point\\'. default_coords_type : str,", "array. Returns ------- points_array : array-like, shape=[...] Points sampled on the PointSet. \"\"\"", "set.\"\"\" @abstractmethod def __repr__(self): \"\"\"Produce a string with a verbal description of the", "An array representation of the Point type. \"\"\" class PointSet(ABC): r\"\"\"Class for a", "\"\"\"Broadcast two lists. Similar behavior as ``gs.broadcast_arrays``, but for lists. \"\"\" n_a =", "\\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" @abstractmethod def belongs(self, point, atol):", "return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if n_b == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise", "PointSet(ABC): r\"\"\"Class for a set of points of type Point. Parameters ---------- param:", "List of Points, shape=[...] Point in the PointSet. end_point: Point or List of", ": list of Point, shape=[...] Number of samples of point type to turn", "into an array. Returns ------- points_array : array-like, shape=[...] Points sampled on the", "manipulate_input(args[pos]) return func(*args, **kwargs) return _wrapped return _dec class Point(ABC): r\"\"\"Class for points", "belongs : array-like, shape=[...] Boolean evaluating if point belongs to the set. \"\"\"", "on the PointSet. \"\"\" @abstractmethod def set_to_array(self, points): \"\"\"Convert a set of points", "------- array_point : array-like, shape=[...] An array representation of the Point type. \"\"\"", "def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point type and transform in iterable if not the", "Stratified Spaces. Lead authors: <NAME> & <NAME> \"\"\" import functools import itertools from", "the PointSet. point_b: Point or List of Point, shape=[...] Point in the PointSet.", "def dist(self, point_a, point_b, **kwargs): \"\"\"Distance between two points in the PointSet. Parameters", "Point, shape=[...] Number of samples of point type to turn into an array.", "end_point: Point or List of Points, shape=[...] Point in the PointSet. Returns -------", "default: \\'intrinsic\\'. \"\"\" def __init__(self, space: PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space = space", "point type and transform in iterable if not the case. Parameters ---------- args_positions", "name in args_positions: if name in kwargs: kwargs[name] = manipulate_input(kwargs[name]) else: args[pos] =", "class PointSet(ABC): r\"\"\"Class for a set of points of type Point. Parameters ----------", "\"\"\" @abstractmethod def random_point(self, n_samples=1): r\"\"\"Sample random points on the PointSet. Parameters ----------", "Distance. \"\"\" @abstractmethod def geodesic(self, initial_point, end_point, **kwargs): \"\"\"Compute the geodesic in the", "Similar behavior as ``gs.broadcast_arrays``, but for lists. \"\"\" n_a = len(list_a) n_b =", "geodesic in the PointSet. Parameters ---------- initial_point: Point or List of Points, shape=[...]", "points into an array. Parameters ---------- points : list of Point, shape=[...] Number", "Number of samples of point type to turn into an array. Returns -------", "Underling PointSet. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\' } Point type. Optional, default:", "shape=[...] Distance. \"\"\" @abstractmethod def geodesic(self, initial_point, end_point, **kwargs): \"\"\"Compute the geodesic in", "Point-like, shape=[...] Point to evaluate. atol : float Absolute tolerance. Optional, default: backend", "type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional,", "int Parameter defining the pointset. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\'} Point type.", "not the case. Parameters ---------- args_positions : tuple Position and corresponding argument name.", "float Absolute tolerance. Optional, default: backend atol. Returns ------- belongs : array-like, shape=[...]", "\\'intrinsic\\'. \"\"\" def __init__(self, space: PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space = space @abstractmethod", "Point, shape=[...] Point in the PointSet. point_b: Point or List of Point, shape=[...]", "Parameters ---------- point_a: Point or List of Point, shape=[...] Point in the PointSet.", "case. Parameters ---------- args_positions : tuple Position and corresponding argument name. A tuple", "to the set. \"\"\" @abstractmethod def random_point(self, n_samples=1): r\"\"\"Sample random points on the", "on the PointSet. \"\"\" class PointSetMetric(ABC): r\"\"\"Class for the lenght spaces. Parameters ----------", "Point in the PointSet. Returns ------- distance : array-like, shape=[...] Distance. \"\"\" @abstractmethod", "for pos, name in args_positions: if name in kwargs: kwargs[name] = manipulate_input(kwargs[name]) else:", "as ``gs.broadcast_arrays``, but for lists. \"\"\" n_a = len(list_a) n_b = len(list_b) if", "default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" def", "def _wrapped(*args, **kwargs): args = list(args) for pos, name in args_positions: if name", "space @abstractmethod def dist(self, point_a, point_b, **kwargs): \"\"\"Distance between two points in the", "in the PointSet. Parameters ---------- point_a: Point or List of Point, shape=[...] Point", "spaces. Parameters ---------- Set : PointSet Underling PointSet. default_point_type : str, {\\'vector\\', \\'matrix\\',", "itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast lens {n_a} and {n_b}\") def _manipulate_input(arg): if", "of input calling. \"\"\" def _dec(func): @functools.wraps(func) def _wrapped(*args, **kwargs): args = list(args)", "def __init__(self, space: PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space = space @abstractmethod def dist(self,", "Optional, default: 1. Returns ------- samples : List of Point Points sampled on", "def __repr__(self): \"\"\"Produce a string with a verbal description of the point.\"\"\" @abstractmethod", "initial_point, end_point, **kwargs): \"\"\"Compute the geodesic in the PointSet. Parameters ---------- initial_point: Point", "Point type. \"\"\" class PointSet(ABC): r\"\"\"Class for a set of points of type", "----- Explicitly defining args_positions and args names ensures it works for all combinations", "(type(arg) in [list, tuple]): return [arg] return arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point", "<reponame>shubhamtalbar96/geomstats \"\"\"Class for Stratified Spaces. Lead authors: <NAME> & <NAME> \"\"\" import functools", "array_point : array-like, shape=[...] An array representation of the Point type. \"\"\" class", "\"\"\" class PointSetMetric(ABC): r\"\"\"Class for the lenght spaces. Parameters ---------- Set : PointSet", "n_a == n_b: return list_a, list_b if n_a == 1: return itertools.zip_longest(list_a, list_b,", "**kwargs) return _wrapped return _dec class Point(ABC): r\"\"\"Class for points of a set.\"\"\"", "list_b, fillvalue=list_a[0]) if n_b == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast", "\\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\"", ": array-like, shape=[...] An array representation of the Point type. \"\"\" class PointSet(ABC):", "not (type(arg) in [list, tuple]): return [arg] return arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check", "List of Point, shape=[...] Point in the PointSet. point_b: Point or List of", "Point(ABC): r\"\"\"Class for points of a set.\"\"\" @abstractmethod def __repr__(self): \"\"\"Produce a string", "default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'.", "ensures it works for all combinations of input calling. \"\"\" def _dec(func): @functools.wraps(func)", "iterable if not the case. Parameters ---------- args_positions : tuple Position and corresponding", "1: return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast lens {n_a} and {n_b}\") def", "point_b: Point or List of Point, shape=[...] Point in the PointSet. Returns -------", "r\"\"\"Class for a set of points of type Point. Parameters ---------- param: int", "**kwargs): args = list(args) for pos, name in args_positions: if name in kwargs:", "type. Optional, default: \\'intrinsic\\'. \"\"\" @abstractmethod def belongs(self, point, atol): r\"\"\"Evaluate if a", "@abstractmethod def to_array(self): \"\"\"Turn the point into a numpy array. Returns ------- array_point", "belongs to the set. Parameters ---------- point : Point-like, shape=[...] Point to evaluate.", "of type Point. Parameters ---------- param: int Parameter defining the pointset. default_point_type :", "atol : float Absolute tolerance. Optional, default: backend atol. Returns ------- belongs :", "shape=[...] Point in the PointSet. Returns ------- distance : array-like, shape=[...] Distance. \"\"\"", "the geodesic in the PointSet. Parameters ---------- initial_point: Point or List of Points,", "PointSet. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\' } Point type. Optional, default: \\'Point\\'.", "Point or List of Points, shape=[...] Point in the PointSet. end_point: Point or", "Point, shape=[...] Point in the PointSet. Returns ------- distance : array-like, shape=[...] Distance.", "Boolean evaluating if point belongs to the set. \"\"\" @abstractmethod def random_point(self, n_samples=1):", "str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" @abstractmethod def belongs(self,", "point.\"\"\" @abstractmethod def to_array(self): \"\"\"Turn the point into a numpy array. Returns -------", "hash for the point.\"\"\" @abstractmethod def to_array(self): \"\"\"Turn the point into a numpy", "etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" def __init__(self, space: PointSet, **kwargs): super(PointSetMetric,", "<NAME> & <NAME> \"\"\" import functools import itertools from abc import ABC, abstractmethod", "tuple for each position. Notes ----- Explicitly defining args_positions and args names ensures", "args_positions: if name in kwargs: kwargs[name] = manipulate_input(kwargs[name]) else: args[pos] = manipulate_input(args[pos]) return", "str, {\\'vector\\', \\'matrix\\', \\'Point\\' } Point type. Optional, default: \\'Point\\'. default_coords_type : str,", "\"\"\" @abstractmethod def set_to_array(self, points): \"\"\"Convert a set of points into an array.", "def random_point(self, n_samples=1): r\"\"\"Sample random points on the PointSet. Parameters ---------- n_samples :", "\\'matrix\\', \\'Point\\'} Point type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc}", "for a set of points of type Point. Parameters ---------- param: int Parameter", "the PointSet. \"\"\" class PointSetMetric(ABC): r\"\"\"Class for the lenght spaces. Parameters ---------- Set", "\"\"\" def __init__(self, space: PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space = space @abstractmethod def", "behavior as ``gs.broadcast_arrays``, but for lists. \"\"\" n_a = len(list_a) n_b = len(list_b)", "Point Points sampled on the PointSet. \"\"\" @abstractmethod def set_to_array(self, points): \"\"\"Convert a", ": array-like, shape=[...] Boolean evaluating if point belongs to the set. \"\"\" @abstractmethod", "defining the pointset. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\'} Point type. Optional, default:", "Point to evaluate. atol : float Absolute tolerance. Optional, default: backend atol. Returns", "transform in iterable if not the case. Parameters ---------- args_positions : tuple Position", "Absolute tolerance. Optional, default: backend atol. Returns ------- belongs : array-like, shape=[...] Boolean", "fillvalue=list_a[0]) if n_b == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast lens", "default: backend atol. Returns ------- belongs : array-like, shape=[...] Boolean evaluating if point", "Point in the PointSet. end_point: Point or List of Points, shape=[...] Point in", "each position. Notes ----- Explicitly defining args_positions and args names ensures it works", "= space @abstractmethod def dist(self, point_a, point_b, **kwargs): \"\"\"Distance between two points in", "Point in the PointSet. Returns ------- path : callable Time parameterized geodesic curve.", "the set. Parameters ---------- point : Point-like, shape=[...] Point to evaluate. atol :", "array. Returns ------- array_point : array-like, shape=[...] An array representation of the Point", "raise Exception(f\"Cannot broadcast lens {n_a} and {n_b}\") def _manipulate_input(arg): if not (type(arg) in", "lenght spaces. Parameters ---------- Set : PointSet Underling PointSet. default_point_type : str, {\\'vector\\',", "_dec(func): @functools.wraps(func) def _wrapped(*args, **kwargs): args = list(args) for pos, name in args_positions:", "the set. \"\"\" @abstractmethod def random_point(self, n_samples=1): r\"\"\"Sample random points on the PointSet.", "dist(self, point_a, point_b, **kwargs): \"\"\"Distance between two points in the PointSet. Parameters ----------", "or List of Point, shape=[...] Point in the PointSet. point_b: Point or List", "import itertools from abc import ABC, abstractmethod def broadcast_lists(list_a, list_b): \"\"\"Broadcast two lists.", "of Point, shape=[...] Point in the PointSet. point_b: Point or List of Point,", "the pointset. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\'} Point type. Optional, default: \\'Point\\'.", "set. \"\"\" @abstractmethod def random_point(self, n_samples=1): r\"\"\"Sample random points on the PointSet. Parameters", "@abstractmethod def random_point(self, n_samples=1): r\"\"\"Sample random points on the PointSet. Parameters ---------- n_samples", "of the Point type. \"\"\" class PointSet(ABC): r\"\"\"Class for a set of points", "_dec class Point(ABC): r\"\"\"Class for points of a set.\"\"\" @abstractmethod def __repr__(self): \"\"\"Produce", "list_b if n_a == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if n_b == 1:", "array-like, shape=[...] Boolean evaluating if point belongs to the set. \"\"\" @abstractmethod def", "@abstractmethod def dist(self, point_a, point_b, **kwargs): \"\"\"Distance between two points in the PointSet.", "Parameter defining the pointset. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\'} Point type. Optional,", "n_b: return list_a, list_b if n_a == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if", "for lists. \"\"\" n_a = len(list_a) n_b = len(list_b) if n_a == n_b:", "of point type to turn into an array. Returns ------- points_array : array-like,", "---------- points : list of Point, shape=[...] Number of samples of point type", "evaluating if point belongs to the set. \"\"\" @abstractmethod def random_point(self, n_samples=1): r\"\"\"Sample", "@functools.wraps(func) def _wrapped(*args, **kwargs): args = list(args) for pos, name in args_positions: if", "in args_positions: if name in kwargs: kwargs[name] = manipulate_input(kwargs[name]) else: args[pos] = manipulate_input(args[pos])", "if a point belongs to the set. Parameters ---------- point : Point-like, shape=[...]", "n_a == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if n_b == 1: return itertools.zip_longest(list_a,", "if name in kwargs: kwargs[name] = manipulate_input(kwargs[name]) else: args[pos] = manipulate_input(args[pos]) return func(*args,", "Point. Parameters ---------- param: int Parameter defining the pointset. default_point_type : str, {\\'vector\\',", ": array-like, shape=[...] Points sampled on the PointSet. \"\"\" class PointSetMetric(ABC): r\"\"\"Class for", "point_a, point_b, **kwargs): \"\"\"Distance between two points in the PointSet. Parameters ---------- point_a:", "of points of type Point. Parameters ---------- param: int Parameter defining the pointset.", "def broadcast_lists(list_a, list_b): \"\"\"Broadcast two lists. Similar behavior as ``gs.broadcast_arrays``, but for lists.", "sampled on the PointSet. \"\"\" @abstractmethod def set_to_array(self, points): \"\"\"Convert a set of", "PointSet. end_point: Point or List of Points, shape=[...] Point in the PointSet. Returns", "@abstractmethod def __hash__(self): \"\"\"Define a hash for the point.\"\"\" @abstractmethod def to_array(self): \"\"\"Turn", "description of the point.\"\"\" @abstractmethod def __hash__(self): \"\"\"Define a hash for the point.\"\"\"", "= manipulate_input(kwargs[name]) else: args[pos] = manipulate_input(args[pos]) return func(*args, **kwargs) return _wrapped return _dec", "import functools import itertools from abc import ABC, abstractmethod def broadcast_lists(list_a, list_b): \"\"\"Broadcast", "of Point Points sampled on the PointSet. \"\"\" @abstractmethod def set_to_array(self, points): \"\"\"Convert", "and corresponding argument name. A tuple for each position. Notes ----- Explicitly defining", "PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space = space @abstractmethod def dist(self, point_a, point_b, **kwargs):", "point_a: Point or List of Point, shape=[...] Point in the PointSet. point_b: Point", "func(*args, **kwargs) return _wrapped return _dec class Point(ABC): r\"\"\"Class for points of a", "n_samples=1): r\"\"\"Sample random points on the PointSet. Parameters ---------- n_samples : int Number", "Set : PointSet Underling PointSet. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\' } Point", "PointSet. Parameters ---------- point_a: Point or List of Point, shape=[...] Point in the", "r\"\"\"Sample random points on the PointSet. Parameters ---------- n_samples : int Number of", "ABC, abstractmethod def broadcast_lists(list_a, list_b): \"\"\"Broadcast two lists. Similar behavior as ``gs.broadcast_arrays``, but", "fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast lens {n_a} and {n_b}\") def _manipulate_input(arg): if not (type(arg)", "an array. Parameters ---------- points : list of Point, shape=[...] Number of samples", "position. Notes ----- Explicitly defining args_positions and args names ensures it works for", "shape=[...] Point in the PointSet. end_point: Point or List of Points, shape=[...] Point", "of samples of point type to turn into an array. Returns ------- points_array", "representation of the Point type. \"\"\" class PointSet(ABC): r\"\"\"Class for a set of", "or List of Point, shape=[...] Point in the PointSet. Returns ------- distance :", "in the PointSet. point_b: Point or List of Point, shape=[...] Point in the", "two lists. Similar behavior as ``gs.broadcast_arrays``, but for lists. \"\"\" n_a = len(list_a)", "Point type. Optional, default: \\'Point\\'. default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type.", "turn into an array. Returns ------- points_array : array-like, shape=[...] Points sampled on", "in the PointSet. Returns ------- distance : array-like, shape=[...] Distance. \"\"\" @abstractmethod def", "Parameters ---------- point : Point-like, shape=[...] Point to evaluate. atol : float Absolute", "argument name. A tuple for each position. Notes ----- Explicitly defining args_positions and", "authors: <NAME> & <NAME> \"\"\" import functools import itertools from abc import ABC,", "PointSet Underling PointSet. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\' } Point type. Optional,", "two points in the PointSet. Parameters ---------- point_a: Point or List of Point,", "---------- point_a: Point or List of Point, shape=[...] Point in the PointSet. point_b:", "PointSet. Parameters ---------- n_samples : int Number of samples. Optional, default: 1. Returns", "default_coords_type : str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" @abstractmethod", "a set of points of type Point. Parameters ---------- param: int Parameter defining", "evaluate. atol : float Absolute tolerance. Optional, default: backend atol. Returns ------- belongs", "for all combinations of input calling. \"\"\" def _dec(func): @functools.wraps(func) def _wrapped(*args, **kwargs):", "distance : array-like, shape=[...] Distance. \"\"\" @abstractmethod def geodesic(self, initial_point, end_point, **kwargs): \"\"\"Compute", "point : Point-like, shape=[...] Point to evaluate. atol : float Absolute tolerance. Optional,", "& <NAME> \"\"\" import functools import itertools from abc import ABC, abstractmethod def", "PointSetMetric(ABC): r\"\"\"Class for the lenght spaces. Parameters ---------- Set : PointSet Underling PointSet.", "points : list of Point, shape=[...] Number of samples of point type to", "abstractmethod def broadcast_lists(list_a, list_b): \"\"\"Broadcast two lists. Similar behavior as ``gs.broadcast_arrays``, but for", "shape=[...] Point in the PointSet. point_b: Point or List of Point, shape=[...] Point", "type and transform in iterable if not the case. Parameters ---------- args_positions :", "{n_a} and {n_b}\") def _manipulate_input(arg): if not (type(arg) in [list, tuple]): return [arg]", "Parameters ---------- args_positions : tuple Position and corresponding argument name. A tuple for", "points in the PointSet. Parameters ---------- point_a: Point or List of Point, shape=[...]", "samples : List of Point Points sampled on the PointSet. \"\"\" @abstractmethod def", "if n_a == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0]) if n_b == 1: return", "\"\"\"Turn the point into a numpy array. Returns ------- array_point : array-like, shape=[...]", "== n_b: return list_a, list_b if n_a == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0])", "---------- Set : PointSet Underling PointSet. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\' }", "Parameters ---------- Set : PointSet Underling PointSet. default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\'", "input calling. \"\"\" def _dec(func): @functools.wraps(func) def _wrapped(*args, **kwargs): args = list(args) for", "PointSet. Returns ------- distance : array-like, shape=[...] Distance. \"\"\" @abstractmethod def geodesic(self, initial_point,", "the PointSet. Parameters ---------- initial_point: Point or List of Points, shape=[...] Point in", "set of points into an array. Parameters ---------- points : list of Point,", "args = list(args) for pos, name in args_positions: if name in kwargs: kwargs[name]", "sampled on the PointSet. \"\"\" class PointSetMetric(ABC): r\"\"\"Class for the lenght spaces. Parameters", "------- samples : List of Point Points sampled on the PointSet. \"\"\" @abstractmethod", "a numpy array. Returns ------- array_point : array-like, shape=[...] An array representation of", "n_b == 1: return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0]) raise Exception(f\"Cannot broadcast lens {n_a} and", "__init__(self, space: PointSet, **kwargs): super(PointSetMetric, self).__init__(**kwargs) self.space = space @abstractmethod def dist(self, point_a,", "geodesic(self, initial_point, end_point, **kwargs): \"\"\"Compute the geodesic in the PointSet. Parameters ---------- initial_point:", "n_a = len(list_a) n_b = len(list_b) if n_a == n_b: return list_a, list_b", "if not the case. Parameters ---------- args_positions : tuple Position and corresponding argument", "Returns ------- distance : array-like, shape=[...] Distance. \"\"\" @abstractmethod def geodesic(self, initial_point, end_point,", "default_point_type : str, {\\'vector\\', \\'matrix\\', \\'Point\\' } Point type. Optional, default: \\'Point\\'. default_coords_type", "class Point(ABC): r\"\"\"Class for points of a set.\"\"\" @abstractmethod def __repr__(self): \"\"\"Produce a", "array representation of the Point type. \"\"\" class PointSet(ABC): r\"\"\"Class for a set", "= manipulate_input(args[pos]) return func(*args, **kwargs) return _wrapped return _dec class Point(ABC): r\"\"\"Class for", "return [arg] return arg def _vectorize_point(*args_positions, manipulate_input=_manipulate_input): \"\"\"Check point type and transform in", "to evaluate. atol : float Absolute tolerance. Optional, default: backend atol. Returns -------", "point type to turn into an array. Returns ------- points_array : array-like, shape=[...]", ": str, {\\'intrinsic\\', \\'extrinsic\\', etc} Coordinate type. Optional, default: \\'intrinsic\\'. \"\"\" @abstractmethod def" ]
[ "instead. If SSL is needed, an upgrade to Python 2.6 on the server-side", "print('''error: Server does not support SMTP-over-SSL. You could use STARTTLS instead. If SSL", "Created on Mar 18, 2016 @author: fky ''' import smtplib if not 'SMTP_SSL'", "'SMTP_SSL' in smtplib.__all__: print('''error: Server does not support SMTP-over-SSL. You could use STARTTLS", "<reponame>Codefans-fan/p2pSpider # -*- coding: utf-8 -*- ''' Created on Mar 18, 2016 @author:", "smtplib if not 'SMTP_SSL' in smtplib.__all__: print('''error: Server does not support SMTP-over-SSL. You", "Server does not support SMTP-over-SSL. You could use STARTTLS instead. If SSL is", "''' Created on Mar 18, 2016 @author: fky ''' import smtplib if not", "STARTTLS instead. If SSL is needed, an upgrade to Python 2.6 on the", "does not support SMTP-over-SSL. You could use STARTTLS instead. If SSL is needed,", "-*- coding: utf-8 -*- ''' Created on Mar 18, 2016 @author: fky '''", "use STARTTLS instead. If SSL is needed, an upgrade to Python 2.6 on", "support SMTP-over-SSL. You could use STARTTLS instead. If SSL is needed, an upgrade", "You could use STARTTLS instead. If SSL is needed, an upgrade to Python", "2016 @author: fky ''' import smtplib if not 'SMTP_SSL' in smtplib.__all__: print('''error: Server", "if not 'SMTP_SSL' in smtplib.__all__: print('''error: Server does not support SMTP-over-SSL. You could", "fky ''' import smtplib if not 'SMTP_SSL' in smtplib.__all__: print('''error: Server does not", "could use STARTTLS instead. If SSL is needed, an upgrade to Python 2.6", "''' import smtplib if not 'SMTP_SSL' in smtplib.__all__: print('''error: Server does not support", "in smtplib.__all__: print('''error: Server does not support SMTP-over-SSL. You could use STARTTLS instead.", "needed, an upgrade to Python 2.6 on the server-side should do the trick.''')", "SSL is needed, an upgrade to Python 2.6 on the server-side should do", "Mar 18, 2016 @author: fky ''' import smtplib if not 'SMTP_SSL' in smtplib.__all__:", "not support SMTP-over-SSL. You could use STARTTLS instead. If SSL is needed, an", "on Mar 18, 2016 @author: fky ''' import smtplib if not 'SMTP_SSL' in", "@author: fky ''' import smtplib if not 'SMTP_SSL' in smtplib.__all__: print('''error: Server does", "coding: utf-8 -*- ''' Created on Mar 18, 2016 @author: fky ''' import", "smtplib.__all__: print('''error: Server does not support SMTP-over-SSL. You could use STARTTLS instead. If", "# -*- coding: utf-8 -*- ''' Created on Mar 18, 2016 @author: fky", "If SSL is needed, an upgrade to Python 2.6 on the server-side should", "an upgrade to Python 2.6 on the server-side should do the trick.''') print(smtplib.__all__)", "is needed, an upgrade to Python 2.6 on the server-side should do the", "not 'SMTP_SSL' in smtplib.__all__: print('''error: Server does not support SMTP-over-SSL. You could use", "SMTP-over-SSL. You could use STARTTLS instead. If SSL is needed, an upgrade to", "utf-8 -*- ''' Created on Mar 18, 2016 @author: fky ''' import smtplib", "18, 2016 @author: fky ''' import smtplib if not 'SMTP_SSL' in smtplib.__all__: print('''error:", "import smtplib if not 'SMTP_SSL' in smtplib.__all__: print('''error: Server does not support SMTP-over-SSL.", "-*- ''' Created on Mar 18, 2016 @author: fky ''' import smtplib if" ]
[ ".recognizer_base import FaceRecognizer from deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME =", "roi in rois: if roi.shape[0] != self.input_hw[0] or roi.shape[1] != self.input_hw[1]: new_roi =", "else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path = os.path.join(dir_path, db_path) with open(db_path, 'rb') as", "4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename = 'weight.mat' filepath = os.path.join(dir_path, filename) if", "db_name, db_feature in self.db.items(): similarity = feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda x: x[1],", "input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image') # read layer info layers = data['layers'] current", "= 'VALID' else: padding = 'SAME' stride = layer[0]['stride'][0][0] kernel, bias = layer[0]['weights'][0][0]", "stride[0], stride[0], 1), padding='SAME') elif layer_type == 'softmax': current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)]))", "new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) # new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: #", "'recognizer_vgg' def __init__(self, custom_db=None): self.batch_size = 4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename =", "'conv': if name[:2] == 'fc': padding = 'VALID' else: padding = 'SAME' stride", "self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32, shape=(None,", "0, 2, 3)) bias = np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0],", "custom_db: db_path = custom_db else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path = os.path.join(dir_path, db_path)", "probs.append(prob) feats.append(feat) probs = np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)] return probs, feats def detect(self,", "rois: if roi.shape[0] != self.input_hw[0] or roi.shape[1] != self.input_hw[1]: new_roi = cv2.resize(roi, self.input_hw,", "np.vstack(feats)[:len(rois)] return probs, feats def detect(self, npimg, rois=None, faces=None): probs, feats = self.extract_features(npimg=npimg,", "import sys import cv2 import numpy as np import tensorflow as tf from", "'fc': padding = 'VALID' else: padding = 'SAME' stride = layer[0]['stride'][0][0] kernel, bias", "self.input_node: roi_chunk }) feat = [np.squeeze(x) for x in feat] probs.append(prob) feats.append(feat) probs", "= faces_to_rois(npimg=npimg, faces=faces) new_rois = [] if len(rois) > 0: new_rois = self.get_new_rois(rois=rois)", "elif layer_type == 'pool': stride = layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0] current = tf.nn.max_pool(current,", "elif layer_type == 'relu': current = tf.nn.relu(current) elif layer_type == 'pool': stride =", "with open(db_path, 'rb') as f: self.db = pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={", "tf.subtract(self.input_node, self.average_image, name='normalized_image') # read layer info layers = data['layers'] current = input_norm", "[] feats = [] if not rois and faces: rois = faces_to_rois(npimg=npimg, faces=faces)", "feat = [np.squeeze(x) for x in feat] probs.append(prob) feats.append(feat) probs = np.vstack(probs)[:len(rois)] feats", "self.batch_size = 4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename = 'weight.mat' filepath = os.path.join(dir_path,", "self.input_hw[1], 3), dtype=np.uint8)): prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: roi_chunk }) feat", "224, 3), dtype=np.uint8) }) def name(self): return FaceRecognizerVGG.NAME def get_new_rois(self, rois): new_rois =", "faces=None): probs, feats = self.extract_features(npimg=npimg, rois=rois, faces=faces) if self.db is None: names =", "bias) elif layer_type == 'relu': current = tf.nn.relu(current) elif layer_type == 'pool': stride", "<gh_stars>1-10 import abc import os import sys import cv2 import numpy as np", "self.class_names = [str(x[0][0]) for x in classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image') #", "def __init__(self, custom_db=None): self.batch_size = 4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename = 'weight.mat'", "bias = layer[0]['weights'][0][0] # kernel = np.transpose(kernel, (1, 0, 2, 3)) bias =", "DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path = os.path.join(dir_path, db_path) with open(db_path, 'rb') as f: self.db =", "3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image') self.class_names", "= pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8)", "db_path = custom_db else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path = os.path.join(dir_path, db_path) with", "current = tf.nn.bias_add(conv, bias) elif layer_type == 'relu': current = tf.nn.relu(current) elif layer_type", "db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path = os.path.join(dir_path, db_path) with open(db_path, 'rb') as f:", "self.network['fc7']], feed_dict={ self.input_node: roi_chunk }) feat = [np.squeeze(x) for x in feat] probs.append(prob)", "layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0] if layer_type == 'conv': if name[:2] == 'fc': padding", "= input_norm network = {} for layer in layers[0]: name = layer[0]['name'][0][0] layer_type", "faces: rois = faces_to_rois(npimg=npimg, faces=faces) new_rois = [] if len(rois) > 0: new_rois", "if roi.shape[0] != self.input_hw[0] or roi.shape[1] != self.input_hw[1]: new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA)", "__init__(self, custom_db=None): self.batch_size = 4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename = 'weight.mat' filepath", "None: names = [[(self.class_names[idx], prop[idx]) for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs]", "= tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph, config=config) self.db = None if", "FileNotFoundError('Weight file not found, path=%s' % filepath) data = loadmat(filepath) # read meta", "name[:2] == 'fc': padding = 'VALID' else: padding = 'SAME' stride = layer[0]['stride'][0][0]", "= tf.Session(graph=self.graph, config=config) self.db = None if custom_db: db_path = custom_db else: db_path", "= meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node =", "found, path=%s' % filepath) data = loadmat(filepath) # read meta info meta =", "x: x[1], reverse=True) names.append(scores) return { 'output': probs, 'feature': feats, 'name': names }", "cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois def extract_features(self, rois=None, npimg=None, faces=None): probs = []", "if not os.path.exists(filepath): raise FileNotFoundError('Weight file not found, path=%s' % filepath) data =", "class FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg' def __init__(self, custom_db=None): self.batch_size = 4 dir_path =", "return { 'output': probs, 'feature': feats, 'name': names } def get_threshold(self): return DeepFaceConfs.get()['recognizer']['vgg']['score_th']", "raise FileNotFoundError('Weight file not found, path=%s' % filepath) data = loadmat(filepath) # read", "faces=faces) if self.db is None: names = [[(self.class_names[idx], prop[idx]) for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]]", "'rb') as f: self.db = pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size,", "similarity = feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda x: x[1], reverse=True) names.append(scores) return {", "data = loadmat(filepath) # read meta info meta = data['meta'] classes = meta['classes']", "new_rois = [] for roi in rois: if roi.shape[0] != self.input_hw[0] or roi.shape[1]", "feat in feats: scores = [] for db_name, db_feature in self.db.items(): similarity =", "stride = layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0] current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1),", "= layer[0]['pool'][0][0] current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0], stride[0], 1),", "# TODO names = [] for feat in feats: scores = [] for", "loadmat import pickle from deepface.confs.conf import DeepFaceConfs from .recognizer_base import FaceRecognizer from deepface.utils.common", "custom_db else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path = os.path.join(dir_path, db_path) with open(db_path, 'rb')", "FaceRecognizerVGG.NAME def get_new_rois(self, rois): new_rois = [] for roi in rois: if roi.shape[0]", "# read meta info meta = data['meta'] classes = meta['classes'] normalization = meta['normalization']", "tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph, config=config) self.db = None if custom_db: db_path = custom_db", "feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg' def __init__(self, custom_db=None): self.batch_size = 4 dir_path", "cv2 import numpy as np import tensorflow as tf from scipy.io import loadmat", "kernel = np.transpose(kernel, (1, 0, 2, 3)) bias = np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current,", "read layer info layers = data['layers'] current = input_norm network = {} for", "os.path.join(dir_path, filename) if not os.path.exists(filepath): raise FileNotFoundError('Weight file not found, path=%s' % filepath)", "'pool': stride = layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0] current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1],", "[np.squeeze(x) for x in feat] probs.append(prob) feats.append(feat) probs = np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)]", "self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image') self.class_names = [str(x[0][0]) for x", "= feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda x: x[1], reverse=True) names.append(scores) return { 'output':", "NAME = 'recognizer_vgg' def __init__(self, custom_db=None): self.batch_size = 4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface')", "= np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)] return probs, feats def detect(self, npimg, rois=None, faces=None):", "!= self.input_hw[1]: new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) # new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi)", "in classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image') # read layer info layers =", "db_feature in self.db.items(): similarity = feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda x: x[1], reverse=True)", "[] for feat in feats: scores = [] for db_name, db_feature in self.db.items():", "= os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename = 'weight.mat' filepath = os.path.join(dir_path, filename) if not os.path.exists(filepath):", "os.path.join(dir_path, db_path) with open(db_path, 'rb') as f: self.db = pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'],", "self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8) }) def name(self): return FaceRecognizerVGG.NAME def get_new_rois(self,", "faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg' def __init__(self, custom_db=None): self.batch_size = 4", "'softmax': current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name] = current self.network = network self.graph", "numpy as np import tensorflow as tf from scipy.io import loadmat import pickle", "for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs] else: # TODO names =", "db_path) with open(db_path, 'rb') as f: self.db = pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']],", "self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8) }) def name(self): return FaceRecognizerVGG.NAME", "}) def name(self): return FaceRecognizerVGG.NAME def get_new_rois(self, rois): new_rois = [] for roi", "in layers[0]: name = layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0] if layer_type == 'conv': if", "loadmat(filepath) # read meta info meta = data['meta'] classes = meta['classes'] normalization =", "from scipy.io import loadmat import pickle from deepface.confs.conf import DeepFaceConfs from .recognizer_base import", "'weight.mat' filepath = os.path.join(dir_path, filename) if not os.path.exists(filepath): raise FileNotFoundError('Weight file not found,", "= cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) # new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: # roi", "feats def detect(self, npimg, rois=None, faces=None): probs, feats = self.extract_features(npimg=npimg, rois=rois, faces=faces) if", "stride[0], stride[0], 1), padding=padding) current = tf.nn.bias_add(conv, bias) elif layer_type == 'relu': current", "= layer[0]['stride'][0][0] kernel, bias = layer[0]['weights'][0][0] # kernel = np.transpose(kernel, (1, 0, 2,", "in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs] else: # TODO names = [] for", "= cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois", "= tf.subtract(self.input_node, self.average_image, name='normalized_image') # read layer info layers = data['layers'] current =", "== 'pool': stride = layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0] current = tf.nn.max_pool(current, ksize=(1, pool[0],", "[] if not rois and faces: rois = faces_to_rois(npimg=npimg, faces=faces) new_rois = []", "= [] feats = [] if not rois and faces: rois = faces_to_rois(npimg=npimg,", "cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois def extract_features(self, rois=None, npimg=None, faces=None): probs = [] feats", "self.average_image, name='normalized_image') # read layer info layers = data['layers'] current = input_norm network", "grouper, faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg' def __init__(self, custom_db=None): self.batch_size =", "feats.append(feat) probs = np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)] return probs, feats def detect(self, npimg,", "tensorflow as tf from scipy.io import loadmat import pickle from deepface.confs.conf import DeepFaceConfs", "= [] for roi in rois: if roi.shape[0] != self.input_hw[0] or roi.shape[1] !=", "feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda x: x[1], reverse=True) names.append(scores) return { 'output': probs,", "layer_type == 'conv': if name[:2] == 'fc': padding = 'VALID' else: padding =", "conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding) current = tf.nn.bias_add(conv, bias)", "new_rois.append(roi) return new_rois def extract_features(self, rois=None, npimg=None, faces=None): probs = [] feats =", "for x in classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image') # read layer info", "dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename = 'weight.mat' filepath = os.path.join(dir_path, filename) if not", "grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)): prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node:", "FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg' def __init__(self, custom_db=None): self.batch_size = 4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),", "= DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path = os.path.join(dir_path, db_path) with open(db_path, 'rb') as f: self.db", "in self.db.items(): similarity = feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda x: x[1], reverse=True) names.append(scores)", "config=config) self.db = None if custom_db: db_path = custom_db else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db',", "1, 1, 3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3),", "config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph, config=config) self.db = None if custom_db: db_path", "= self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: roi_chunk }) feat = [np.squeeze(x) for x in", "self.db = pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size, 224, 224, 3),", "FaceRecognizer from deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg' def", "or roi.shape[1] != self.input_hw[1]: new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) # new_roi = cv2.cvtColor(new_roi,", "cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois def", "def detect(self, npimg, rois=None, faces=None): probs, feats = self.extract_features(npimg=npimg, rois=rois, faces=faces) if self.db", "rois=rois, faces=faces) if self.db is None: names = [[(self.class_names[idx], prop[idx]) for idx in", "def name(self): return FaceRecognizerVGG.NAME def get_new_rois(self, rois): new_rois = [] for roi in", "len(rois) > 0: new_rois = self.get_new_rois(rois=rois) for roi_chunk in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1],", "scipy.io import loadmat import pickle from deepface.confs.conf import DeepFaceConfs from .recognizer_base import FaceRecognizer", "else: # TODO names = [] for feat in feats: scores = []", "not found, path=%s' % filepath) data = loadmat(filepath) # read meta info meta", "= {} for layer in layers[0]: name = layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0] if", "elif layer_type == 'softmax': current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name] = current self.network", "dtype=np.uint8) }) def name(self): return FaceRecognizerVGG.NAME def get_new_rois(self, rois): new_rois = [] for", "data['meta'] classes = meta['classes'] normalization = meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3)", "3), dtype=np.uint8) }) def name(self): return FaceRecognizerVGG.NAME def get_new_rois(self, rois): new_rois = []", "filename) if not os.path.exists(filepath): raise FileNotFoundError('Weight file not found, path=%s' % filepath) data", "np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)] return probs, feats def detect(self, npimg, rois=None, faces=None): probs,", "roi.shape[0] != self.input_hw[0] or roi.shape[1] != self.input_hw[1]: new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) #", "bias = np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding) current", "network self.graph = tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph, config=config) self.db =", "abc import os import sys import cv2 import numpy as np import tensorflow", "np.transpose(kernel, (1, 0, 2, 3)) bias = np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1,", "def get_new_rois(self, rois): new_rois = [] for roi in rois: if roi.shape[0] !=", "scores = [] for db_name, db_feature in self.db.items(): similarity = feat_distance_cosine(feat, db_feature) scores.append((db_name,", "in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)): prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={", "tf.Session(graph=self.graph, config=config) self.db = None if custom_db: db_path = custom_db else: db_path =", "feed_dict={ self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8) }) def name(self): return FaceRecognizerVGG.NAME def", "scores.append((db_name, similarity)) scores.sort(key=lambda x: x[1], reverse=True) names.append(scores) return { 'output': probs, 'feature': feats,", "for layer in layers[0]: name = layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0] if layer_type ==", "= self.get_new_rois(rois=rois) for roi_chunk in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)): prob, feat", "= layer[0]['type'][0][0] if layer_type == 'conv': if name[:2] == 'fc': padding = 'VALID'", "prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs] else: # TODO names = [] for feat", "rois and faces: rois = faces_to_rois(npimg=npimg, faces=faces) new_rois = [] if len(rois) >", "self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image') self.class_names =", "reverse=True) names.append(scores) return { 'output': probs, 'feature': feats, 'name': names } def get_threshold(self):", "self.graph = tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph, config=config) self.db = None", "self.db.items(): similarity = feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda x: x[1], reverse=True) names.append(scores) return", "tf from scipy.io import loadmat import pickle from deepface.confs.conf import DeepFaceConfs from .recognizer_base", "import cv2 import numpy as np import tensorflow as tf from scipy.io import", "= data['meta'] classes = meta['classes'] normalization = meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1,", "name='image') self.class_names = [str(x[0][0]) for x in classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image')", "name(self): return FaceRecognizerVGG.NAME def get_new_rois(self, rois): new_rois = [] for roi in rois:", "if name[:2] == 'fc': padding = 'VALID' else: padding = 'SAME' stride =", "import os import sys import cv2 import numpy as np import tensorflow as", "tf.nn.bias_add(conv, bias) elif layer_type == 'relu': current = tf.nn.relu(current) elif layer_type == 'pool':", "get_new_rois(self, rois): new_rois = [] for roi in rois: if roi.shape[0] != self.input_hw[0]", "np import tensorflow as tf from scipy.io import loadmat import pickle from deepface.confs.conf", "x[1], reverse=True) names.append(scores) return { 'output': probs, 'feature': feats, 'name': names } def", "padding = 'VALID' else: padding = 'SAME' stride = layer[0]['stride'][0][0] kernel, bias =", "self.extract_features(npimg=npimg, rois=rois, faces=faces) if self.db is None: names = [[(self.class_names[idx], prop[idx]) for idx", "import pickle from deepface.confs.conf import DeepFaceConfs from .recognizer_base import FaceRecognizer from deepface.utils.common import", "fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)): prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: roi_chunk })", "if self.db is None: names = [[(self.class_names[idx], prop[idx]) for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for", "for roi in rois: if roi.shape[0] != self.input_hw[0] or roi.shape[1] != self.input_hw[1]: new_roi", "tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image') self.class_names = [str(x[0][0]) for", "feats = [] if not rois and faces: rois = faces_to_rois(npimg=npimg, faces=faces) new_rois", "self.input_hw[0] or roi.shape[1] != self.input_hw[1]: new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) # new_roi =", "% filepath) data = loadmat(filepath) # read meta info meta = data['meta'] classes", "roi_chunk in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)): prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']],", "ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0], stride[0], 1), padding='SAME') elif layer_type == 'softmax':", "current = tf.nn.relu(current) elif layer_type == 'pool': stride = layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0]", "in feats: scores = [] for db_name, db_feature in self.db.items(): similarity = feat_distance_cosine(feat,", "'') db_path = os.path.join(dir_path, db_path) with open(db_path, 'rb') as f: self.db = pickle.load(f)", "self.input_hw[1], 3), name='image') self.class_names = [str(x[0][0]) for x in classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node,", "= np.transpose(kernel, (1, 0, 2, 3)) bias = np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current, tf.constant(kernel),", "2, 3)) bias = np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1),", "np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding) current = tf.nn.bias_add(conv,", "x in feat] probs.append(prob) feats.append(feat) probs = np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)] return probs,", "new_rois def extract_features(self, rois=None, npimg=None, faces=None): probs = [] feats = [] if", "tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name] = current self.network = network self.graph = tf.get_default_graph() config", "is None: names = [[(self.class_names[idx], prop[idx]) for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in", "names.append(scores) return { 'output': probs, 'feature': feats, 'name': names } def get_threshold(self): return", "= data['layers'] current = input_norm network = {} for layer in layers[0]: name", "np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1],", "import DeepFaceConfs from .recognizer_base import FaceRecognizer from deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine class", "probs] else: # TODO names = [] for feat in feats: scores =", "= loadmat(filepath) # read meta info meta = data['meta'] classes = meta['classes'] normalization", "tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding) current = tf.nn.bias_add(conv, bias) elif layer_type", "layer[0]['pool'][0][0] current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0], stride[0], 1), padding='SAME')", "roi_chunk }) feat = [np.squeeze(x) for x in feat] probs.append(prob) feats.append(feat) probs =", "detect(self, npimg, rois=None, faces=None): probs, feats = self.extract_features(npimg=npimg, rois=rois, faces=faces) if self.db is", "= os.path.join(dir_path, filename) if not os.path.exists(filepath): raise FileNotFoundError('Weight file not found, path=%s' %", "as tf from scipy.io import loadmat import pickle from deepface.confs.conf import DeepFaceConfs from", "x in classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image') # read layer info layers", "padding='SAME') elif layer_type == 'softmax': current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name] = current", "layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0] current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0],", "filepath) data = loadmat(filepath) # read meta info meta = data['meta'] classes =", "= [] if len(rois) > 0: new_rois = self.get_new_rois(rois=rois) for roi_chunk in grouper(new_rois,", "import FaceRecognizer from deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg'", "npimg, rois=None, faces=None): probs, feats = self.extract_features(npimg=npimg, rois=rois, faces=faces) if self.db is None:", "info layers = data['layers'] current = input_norm network = {} for layer in", "and faces: rois = faces_to_rois(npimg=npimg, faces=faces) new_rois = [] if len(rois) > 0:", "not os.path.exists(filepath): raise FileNotFoundError('Weight file not found, path=%s' % filepath) data = loadmat(filepath)", "= network self.graph = tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph, config=config) self.db", "current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name] = current self.network = network self.graph =", "pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8) })", "layer_type == 'relu': current = tf.nn.relu(current) elif layer_type == 'pool': stride = layer[0]['stride'][0][0]", "layers[0]: name = layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0] if layer_type == 'conv': if name[:2]", "= tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image') self.class_names = [str(x[0][0])", "normalization = meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node", "self.network = network self.graph = tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph, config=config)", "len(self.class_names)])) network[name] = current self.network = network self.graph = tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))", "open(db_path, 'rb') as f: self.db = pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node:", "for roi_chunk in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)): prob, feat = self.persistent_sess.run([self.network['prob'],", "import loadmat import pickle from deepface.confs.conf import DeepFaceConfs from .recognizer_base import FaceRecognizer from", "info meta = data['meta'] classes = meta['classes'] normalization = meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1,", "custom_db=None): self.batch_size = 4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename = 'weight.mat' filepath =", "shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image') self.class_names = [str(x[0][0]) for x in classes[0][0]['description'][0][0]] input_norm", "= [str(x[0][0]) for x in classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image') # read", "else: padding = 'SAME' stride = layer[0]['stride'][0][0] kernel, bias = layer[0]['weights'][0][0] # kernel", "meta['classes'] normalization = meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2])", "current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0], stride[0], 1), padding='SAME') elif", "# warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8) }) def", "(1, 0, 2, 3)) bias = np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0],", "3)) bias = np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding)", "name = layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0] if layer_type == 'conv': if name[:2] ==", "= meta['classes'] normalization = meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3) self.input_hw =", "== 'fc': padding = 'VALID' else: padding = 'SAME' stride = layer[0]['stride'][0][0] kernel,", "padding=padding) current = tf.nn.bias_add(conv, bias) elif layer_type == 'relu': current = tf.nn.relu(current) elif", "= np.squeeze(bias).reshape(-1) conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding) current =", "kernel, bias = layer[0]['weights'][0][0] # kernel = np.transpose(kernel, (1, 0, 2, 3)) bias", "feat] probs.append(prob) feats.append(feat) probs = np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)] return probs, feats def", "DeepFaceConfs from .recognizer_base import FaceRecognizer from deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer):", "== 'conv': if name[:2] == 'fc': padding = 'VALID' else: padding = 'SAME'", "network[name] = current self.network = network self.graph = tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess", "as np import tensorflow as tf from scipy.io import loadmat import pickle from", "= current self.network = network self.graph = tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess =", "probs = np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)] return probs, feats def detect(self, npimg, rois=None,", "classes = meta['classes'] normalization = meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3) self.input_hw", "from deepface.confs.conf import DeepFaceConfs from .recognizer_base import FaceRecognizer from deepface.utils.common import grouper, faces_to_rois,", "stride[0], 1), padding=padding) current = tf.nn.bias_add(conv, bias) elif layer_type == 'relu': current =", "self.persistent_sess = tf.Session(graph=self.graph, config=config) self.db = None if custom_db: db_path = custom_db else:", "self.get_new_rois(rois=rois) for roi_chunk in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)): prob, feat =", "= 'weight.mat' filepath = os.path.join(dir_path, filename) if not os.path.exists(filepath): raise FileNotFoundError('Weight file not", "new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return", "idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs] else: # TODO names = []", "}) feat = [np.squeeze(x) for x in feat] probs.append(prob) feats.append(feat) probs = np.vstack(probs)[:len(rois)]", "tf.nn.relu(current) elif layer_type == 'pool': stride = layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0] current =", "= tf.nn.relu(current) elif layer_type == 'pool': stride = layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0] current", "for feat in feats: scores = [] for db_name, db_feature in self.db.items(): similarity", "classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image') # read layer info layers = data['layers']", "feats: scores = [] for db_name, db_feature in self.db.items(): similarity = feat_distance_cosine(feat, db_feature)", "= 4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename = 'weight.mat' filepath = os.path.join(dir_path, filename)", "warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8) }) def name(self):", "= [np.squeeze(x) for x in feat] probs.append(prob) feats.append(feat) probs = np.vstack(probs)[:len(rois)] feats =", "path=%s' % filepath) data = loadmat(filepath) # read meta info meta = data['meta']", "if layer_type == 'conv': if name[:2] == 'fc': padding = 'VALID' else: padding", "padding = 'SAME' stride = layer[0]['stride'][0][0] kernel, bias = layer[0]['weights'][0][0] # kernel =", "scores.sort(key=lambda x: x[1], reverse=True) names.append(scores) return { 'output': probs, 'feature': feats, 'name': names", "self.input_hw[0], self.input_hw[1], 3), name='image') self.class_names = [str(x[0][0]) for x in classes[0][0]['description'][0][0]] input_norm =", "stride = layer[0]['stride'][0][0] kernel, bias = layer[0]['weights'][0][0] # kernel = np.transpose(kernel, (1, 0,", "tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image') self.class_names = [str(x[0][0]) for x in classes[0][0]['description'][0][0]]", "rois=None, npimg=None, faces=None): probs = [] feats = [] if not rois and", "feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: roi_chunk }) feat = [np.squeeze(x) for x", "1), padding='SAME') elif layer_type == 'softmax': current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name] =", "self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8) }) def name(self): return", "[[(self.class_names[idx], prop[idx]) for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs] else: # TODO", "interpolation=cv2.INTER_AREA) # new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)", "network = {} for layer in layers[0]: name = layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0]", "{} for layer in layers[0]: name = layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0] if layer_type", "not rois and faces: rois = faces_to_rois(npimg=npimg, faces=faces) new_rois = [] if len(rois)", "layer_type == 'softmax': current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name] = current self.network =", "dtype=np.uint8)): prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: roi_chunk }) feat = [np.squeeze(x)", "= self.extract_features(npimg=npimg, rois=rois, faces=faces) if self.db is None: names = [[(self.class_names[idx], prop[idx]) for", "pickle from deepface.confs.conf import DeepFaceConfs from .recognizer_base import FaceRecognizer from deepface.utils.common import grouper,", "= [] for feat in feats: scores = [] for db_name, db_feature in", "db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda x: x[1], reverse=True) names.append(scores) return { 'output': probs, 'feature':", "probs, feats def detect(self, npimg, rois=None, faces=None): probs, feats = self.extract_features(npimg=npimg, rois=rois, faces=faces)", "= [[(self.class_names[idx], prop[idx]) for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs] else: #", "= cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois def extract_features(self, rois=None, npimg=None, faces=None): probs =", "layer[0]['stride'][0][0] kernel, bias = layer[0]['weights'][0][0] # kernel = np.transpose(kernel, (1, 0, 2, 3))", "cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois def extract_features(self,", "= [] for db_name, db_feature in self.db.items(): similarity = feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity))", "strides=(1, stride[0], stride[0], 1), padding='SAME') elif layer_type == 'softmax': current = tf.nn.softmax(tf.reshape(current, [-1,", "faces_to_rois(npimg=npimg, faces=faces) new_rois = [] if len(rois) > 0: new_rois = self.get_new_rois(rois=rois) for", "= tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image') self.class_names = [str(x[0][0]) for x in", "feats = np.vstack(feats)[:len(rois)] return probs, feats def detect(self, npimg, rois=None, faces=None): probs, feats", "if custom_db: db_path = custom_db else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path = os.path.join(dir_path,", "np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8) }) def name(self): return FaceRecognizerVGG.NAME def get_new_rois(self, rois):", "from .recognizer_base import FaceRecognizer from deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME", "read meta info meta = data['meta'] classes = meta['classes'] normalization = meta['normalization'] self.average_image", "[str(x[0][0]) for x in classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image') # read layer", "file not found, path=%s' % filepath) data = loadmat(filepath) # read meta info", "1, 3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image')", "layer[0]['weights'][0][0] # kernel = np.transpose(kernel, (1, 0, 2, 3)) bias = np.squeeze(bias).reshape(-1) conv", "= tf.nn.bias_add(conv, bias) elif layer_type == 'relu': current = tf.nn.relu(current) elif layer_type ==", "names = [[(self.class_names[idx], prop[idx]) for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs] else:", "= tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding) current = tf.nn.bias_add(conv, bias) elif", "roi.shape[1] != self.input_hw[1]: new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) # new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB)", "# new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi)", "self.db is None: names = [[(self.class_names[idx], prop[idx]) for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop", "rois = faces_to_rois(npimg=npimg, faces=faces) new_rois = [] if len(rois) > 0: new_rois =", "for x in feat] probs.append(prob) feats.append(feat) probs = np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)] return", "1), padding=padding) current = tf.nn.bias_add(conv, bias) elif layer_type == 'relu': current = tf.nn.relu(current)", "'VALID' else: padding = 'SAME' stride = layer[0]['stride'][0][0] kernel, bias = layer[0]['weights'][0][0] #", "self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: roi_chunk }) feat = [np.squeeze(x) for x in feat]", "[] for db_name, db_feature in self.db.items(): similarity = feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda", "npimg=None, faces=None): probs = [] feats = [] if not rois and faces:", "layer[0]['type'][0][0] if layer_type == 'conv': if name[:2] == 'fc': padding = 'VALID' else:", "!= self.input_hw[0] or roi.shape[1] != self.input_hw[1]: new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) # new_roi", "prop[idx]) for idx in prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs] else: # TODO names", "TODO names = [] for feat in feats: scores = [] for db_name,", "for db_name, db_feature in self.db.items(): similarity = feat_distance_cosine(feat, db_feature) scores.append((db_name, similarity)) scores.sort(key=lambda x:", "os.path.exists(filepath): raise FileNotFoundError('Weight file not found, path=%s' % filepath) data = loadmat(filepath) #", "= 'recognizer_vgg' def __init__(self, custom_db=None): self.batch_size = 4 dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename", "feats = self.extract_features(npimg=npimg, rois=rois, faces=faces) if self.db is None: names = [[(self.class_names[idx], prop[idx])", "rois): new_rois = [] for roi in rois: if roi.shape[0] != self.input_hw[0] or", "sys import cv2 import numpy as np import tensorflow as tf from scipy.io", "[] for roi in rois: if roi.shape[0] != self.input_hw[0] or roi.shape[1] != self.input_hw[1]:", "'vggface') filename = 'weight.mat' filepath = os.path.join(dir_path, filename) if not os.path.exists(filepath): raise FileNotFoundError('Weight", "= [] if not rois and faces: rois = faces_to_rois(npimg=npimg, faces=faces) new_rois =", "if not rois and faces: rois = faces_to_rois(npimg=npimg, faces=faces) new_rois = [] if", "= custom_db else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path = os.path.join(dir_path, db_path) with open(db_path,", "new_rois.append(new_roi) else: # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois def extract_features(self, rois=None,", "strides=(1, stride[0], stride[0], 1), padding=padding) current = tf.nn.bias_add(conv, bias) elif layer_type == 'relu':", "0: new_rois = self.get_new_rois(rois=rois) for roi_chunk in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)):", "similarity)) scores.sort(key=lambda x: x[1], reverse=True) names.append(scores) return { 'output': probs, 'feature': feats, 'name':", "tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0], stride[0], 1), padding='SAME') elif layer_type ==", "if len(rois) > 0: new_rois = self.get_new_rois(rois=rois) for roi_chunk in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0],", "self.input_hw[1]: new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) # new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else:", "= os.path.join(dir_path, db_path) with open(db_path, 'rb') as f: self.db = pickle.load(f) # warm-up", "tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph, config=config) self.db = None if custom_db:", "from deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg' def __init__(self,", "as f: self.db = pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size, 224,", "'relu': current = tf.nn.relu(current) elif layer_type == 'pool': stride = layer[0]['stride'][0][0] pool =", "= layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0] if layer_type == 'conv': if name[:2] == 'fc':", "# roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois def extract_features(self, rois=None, npimg=None, faces=None):", "import abc import os import sys import cv2 import numpy as np import", "# read layer info layers = data['layers'] current = input_norm network = {}", "data['layers'] current = input_norm network = {} for layer in layers[0]: name =", "new_rois = [] if len(rois) > 0: new_rois = self.get_new_rois(rois=rois) for roi_chunk in", "self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)): prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: roi_chunk", "import grouper, faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg' def __init__(self, custom_db=None): self.batch_size", "= tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name] = current self.network = network self.graph = tf.get_default_graph()", "input_norm network = {} for layer in layers[0]: name = layer[0]['name'][0][0] layer_type =", "names = [] for feat in feats: scores = [] for db_name, db_feature", "def extract_features(self, rois=None, npimg=None, faces=None): probs = [] feats = [] if not", "for prop in probs] else: # TODO names = [] for feat in", "db_path = os.path.join(dir_path, db_path) with open(db_path, 'rb') as f: self.db = pickle.load(f) #", "else: # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois def extract_features(self, rois=None, npimg=None,", "filename = 'weight.mat' filepath = os.path.join(dir_path, filename) if not os.path.exists(filepath): raise FileNotFoundError('Weight file", "in probs] else: # TODO names = [] for feat in feats: scores", "tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding) current = tf.nn.bias_add(conv, bias) elif layer_type ==", "= tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0], stride[0], 1), padding='SAME') elif layer_type", "os import sys import cv2 import numpy as np import tensorflow as tf", "prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: roi_chunk }) feat = [np.squeeze(x) for", "f: self.db = pickle.load(f) # warm-up self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: np.zeros((self.batch_size, 224, 224,", "layer info layers = data['layers'] current = input_norm network = {} for layer", "return FaceRecognizerVGG.NAME def get_new_rois(self, rois): new_rois = [] for roi in rois: if", "in feat] probs.append(prob) feats.append(feat) probs = np.vstack(probs)[:len(rois)] feats = np.vstack(feats)[:len(rois)] return probs, feats", "= None if custom_db: db_path = custom_db else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path", "probs, feats = self.extract_features(npimg=npimg, rois=rois, faces=faces) if self.db is None: names = [[(self.class_names[idx],", "layers = data['layers'] current = input_norm network = {} for layer in layers[0]:", "deepface.confs.conf import DeepFaceConfs from .recognizer_base import FaceRecognizer from deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine", "self.input_hw, interpolation=cv2.INTER_AREA) # new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: # roi = cv2.cvtColor(roi,", "> 0: new_rois = self.get_new_rois(rois=rois) for roi_chunk in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3),", "import numpy as np import tensorflow as tf from scipy.io import loadmat import", "meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32,", "faces=faces) new_rois = [] if len(rois) > 0: new_rois = self.get_new_rois(rois=rois) for roi_chunk", "= layer[0]['weights'][0][0] # kernel = np.transpose(kernel, (1, 0, 2, 3)) bias = np.squeeze(bias).reshape(-1)", "new_rois = self.get_new_rois(rois=rois) for roi_chunk in grouper(new_rois, self.batch_size, fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)): prob,", "= np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3) self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2]) self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0],", "stride[0], 1), padding='SAME') elif layer_type == 'softmax': current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name]", "layer in layers[0]: name = layer[0]['name'][0][0] layer_type = layer[0]['type'][0][0] if layer_type == 'conv':", "== 'relu': current = tf.nn.relu(current) elif layer_type == 'pool': stride = layer[0]['stride'][0][0] pool", "deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine class FaceRecognizerVGG(FaceRecognizer): NAME = 'recognizer_vgg' def __init__(self, custom_db=None):", "layer_type == 'pool': stride = layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0] current = tf.nn.max_pool(current, ksize=(1,", "faces=None): probs = [] feats = [] if not rois and faces: rois", "return new_rois def extract_features(self, rois=None, npimg=None, faces=None): probs = [] feats = []", "cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA) # new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB) new_rois.append(new_roi) else: # roi =", "self.db = None if custom_db: db_path = custom_db else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '')", "None if custom_db: db_path = custom_db else: db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '') db_path =", "layer_type = layer[0]['type'][0][0] if layer_type == 'conv': if name[:2] == 'fc': padding =", "probs = [] feats = [] if not rois and faces: rois =", "filepath = os.path.join(dir_path, filename) if not os.path.exists(filepath): raise FileNotFoundError('Weight file not found, path=%s'", "# kernel = np.transpose(kernel, (1, 0, 2, 3)) bias = np.squeeze(bias).reshape(-1) conv =", "in rois: if roi.shape[0] != self.input_hw[0] or roi.shape[1] != self.input_hw[1]: new_roi = cv2.resize(roi,", "name='normalized_image') # read layer info layers = data['layers'] current = input_norm network =", "roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB) new_rois.append(roi) return new_rois def extract_features(self, rois=None, npimg=None, faces=None): probs", "extract_features(self, rois=None, npimg=None, faces=None): probs = [] feats = [] if not rois", "== 'softmax': current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)])) network[name] = current self.network = network", "3), name='image') self.class_names = [str(x[0][0]) for x in classes[0][0]['description'][0][0]] input_norm = tf.subtract(self.input_node, self.average_image,", "'SAME' stride = layer[0]['stride'][0][0] kernel, bias = layer[0]['weights'][0][0] # kernel = np.transpose(kernel, (1,", "current self.network = network self.graph = tf.get_default_graph() config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph,", "= tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) self.persistent_sess = tf.Session(graph=self.graph, config=config) self.db = None if custom_db: db_path =", "pool[0], pool[1], 1), strides=(1, stride[0], stride[0], 1), padding='SAME') elif layer_type == 'softmax': current", "= 'SAME' stride = layer[0]['stride'][0][0] kernel, bias = layer[0]['weights'][0][0] # kernel = np.transpose(kernel,", "import tensorflow as tf from scipy.io import loadmat import pickle from deepface.confs.conf import", "return probs, feats def detect(self, npimg, rois=None, faces=None): probs, feats = self.extract_features(npimg=npimg, rois=rois,", "= np.vstack(feats)[:len(rois)] return probs, feats def detect(self, npimg, rois=None, faces=None): probs, feats =", "prop in probs] else: # TODO names = [] for feat in feats:", "current = input_norm network = {} for layer in layers[0]: name = layer[0]['name'][0][0]", "feed_dict={ self.input_node: roi_chunk }) feat = [np.squeeze(x) for x in feat] probs.append(prob) feats.append(feat)", "1), strides=(1, stride[0], stride[0], 1), padding='SAME') elif layer_type == 'softmax': current = tf.nn.softmax(tf.reshape(current,", "[] if len(rois) > 0: new_rois = self.get_new_rois(rois=rois) for roi_chunk in grouper(new_rois, self.batch_size,", "pool = layer[0]['pool'][0][0] current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0], stride[0],", "os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface') filename = 'weight.mat' filepath = os.path.join(dir_path, filename) if not os.path.exists(filepath): raise", "meta info meta = data['meta'] classes = meta['classes'] normalization = meta['normalization'] self.average_image =", "meta = data['meta'] classes = meta['classes'] normalization = meta['normalization'] self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1,", "224, 224, 3), dtype=np.uint8) }) def name(self): return FaceRecognizerVGG.NAME def get_new_rois(self, rois): new_rois", "rois=None, faces=None): probs, feats = self.extract_features(npimg=npimg, rois=rois, faces=faces) if self.db is None: names", "[-1, len(self.class_names)])) network[name] = current self.network = network self.graph = tf.get_default_graph() config =", "3), dtype=np.uint8)): prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={ self.input_node: roi_chunk }) feat =", "= layer[0]['stride'][0][0] pool = layer[0]['pool'][0][0] current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1,", "pool[1], 1), strides=(1, stride[0], stride[0], 1), padding='SAME') elif layer_type == 'softmax': current =" ]
[ "u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32',", "u'uptime': 4525784, u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6':", "{u'is_enabled': True, u'uptime': 4525781, u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes':", "dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10',", "\"global\": { \"router_id\": \"172.16.31.10\", \"peers\": { \"192.168.0.0\": { \"is_enabled\": true, \"uptime\": 4525781, \"remote_as\":", "from napalm_base import get_network_driver junos_driver = get_network_driver('junos') junos_device = {'username': 'pytraining', 'password': '<PASSWORD>',", "'172.30.179.95'} with junos_driver(**junos_device) as junos: print('-'*60) print junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60)", "\"description\": \"\" }, \"192.168.0.4\": { \"is_enabled\": true, \"uptime\": 4525784, \"remote_as\": 110, \"address_family\": {", "\"remote_as\": 110, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 6, \"accepted_prefixes\": 5, \"received_prefixes\": 5 },", "109, u'is_up': True, u'description': u''}, '192.168.0.4': {u'is_enabled': True, u'uptime': 4525784, u'remote_as': 110, u'address_family':", "{u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}},", "\"is_enabled\": true, \"uptime\": 4525784, \"remote_as\": 110, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 6, \"accepted_prefixes\":", "{u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes':", "-1}}, u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up': True, u'description': u''}, '192.168.0.4': {u'is_enabled': True, u'uptime':", "json import dumps from napalm_base import get_network_driver junos_driver = get_network_driver('junos') junos_device = {'username':", "junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781, u'remote_as': 104,", "\"received_prefixes\": -1 } }, \"remote_id\": \"172.16.58.3\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" }", "\"router_id\": \"172.16.31.10\", \"peers\": { \"192.168.0.0\": { \"is_enabled\": true, \"uptime\": 4525781, \"remote_as\": 104, \"address_family\":", "-1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.58.3\", \"local_as\": 109, \"is_up\": true,", "u''}}}} ------------------------------------------------------------ { \"global\": { \"router_id\": \"172.16.31.10\", \"peers\": { \"192.168.0.0\": { \"is_enabled\": true,", "u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as': 109,", "True, u'description': u''}}}} ------------------------------------------------------------ { \"global\": { \"router_id\": \"172.16.31.10\", \"peers\": { \"192.168.0.0\": {", "-1 } }, \"remote_id\": \"172.16.17.32\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" }, \"192.168.0.4\":", "u'description': u''}, '192.168.0.4': {u'is_enabled': True, u'uptime': 4525784, u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes': 6,", "\"received_prefixes\": -1 } }, \"remote_id\": \"172.16.17.32\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" },", "true, \"description\": \"\" }, \"192.168.0.4\": { \"is_enabled\": true, \"uptime\": 4525784, \"remote_as\": 110, \"address_family\":", "napalm_base import get_network_driver junos_driver = get_network_driver('junos') junos_device = {'username': 'pytraining', 'password': '<PASSWORD>', 'hostname':", "\"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.17.32\", \"local_as\":", "5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as':", "as junos: print('-'*60) print junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] '''", "------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781, u'remote_as': 104, u'address_family':", "\"sent_prefixes\": 5, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1,", "'password': '<PASSWORD>', 'hostname': '172.30.179.95'} with junos_driver(**junos_device) as junos: print('-'*60) print junos.get_bgp_neighbors() print('-'*60) print", "5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up':", "\"sent_prefixes\": 6, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1,", "\"is_enabled\": true, \"uptime\": 4525781, \"remote_as\": 104, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 5, \"accepted_prefixes\":", "\"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } },", "5, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id':", "u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3',", "'192.168.0.4': {u'is_enabled': True, u'uptime': 4525784, u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5,", "true, \"uptime\": 4525781, \"remote_as\": 104, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 5, \"accepted_prefixes\": 5,", "u'description': u''}}}} ------------------------------------------------------------ { \"global\": { \"router_id\": \"172.16.31.10\", \"peers\": { \"192.168.0.0\": { \"is_enabled\":", "4525781, u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes':", "\"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.58.3\", \"local_as\": 109, \"is_up\": true, \"description\":", "\"address_family\": { \"ipv4\": { \"sent_prefixes\": 5, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": {", "-1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.17.32\", \"local_as\": 109, \"is_up\": true, \"description\": \"\"", "'hostname': '172.30.179.95'} with junos_driver(**junos_device) as junos: print('-'*60) print junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4)", "dumps from napalm_base import get_network_driver junos_driver = get_network_driver('junos') junos_device = {'username': 'pytraining', 'password':", "110, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 6, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\":", "}, \"192.168.0.4\": { \"is_enabled\": true, \"uptime\": 4525784, \"remote_as\": 110, \"address_family\": { \"ipv4\": {", "5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as':", "{u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes':", "u'172.16.58.3', u'local_as': 109, u'is_up': True, u'description': u''}}}} ------------------------------------------------------------ { \"global\": { \"router_id\": \"172.16.31.10\",", "'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'} with junos_driver(**junos_device) as junos: print('-'*60) print junos.get_bgp_neighbors() print('-'*60)", "u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up': True, u'description': u''}, '192.168.0.4': {u'is_enabled': True, u'uptime': 4525784,", "{u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781, u'remote_as': 104, u'address_family': {u'ipv4':", "u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up': True, u'description': u''}, '192.168.0.4':", "109, u'is_up': True, u'description': u''}}}} ------------------------------------------------------------ { \"global\": { \"router_id\": \"172.16.31.10\", \"peers\": {", "110, u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes':", "u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as': 109,", "5, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\":", "\"uptime\": 4525781, \"remote_as\": 104, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 5, \"accepted_prefixes\": 5, \"received_prefixes\":", "python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781, u'remote_as':", "{ \"router_id\": \"172.16.31.10\", \"peers\": { \"192.168.0.0\": { \"is_enabled\": true, \"uptime\": 4525781, \"remote_as\": 104,", "\"192.168.0.4\": { \"is_enabled\": true, \"uptime\": 4525784, \"remote_as\": 110, \"address_family\": { \"ipv4\": { \"sent_prefixes\":", "print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0':", "u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up': True,", "{ \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.17.32\", \"local_as\": 109,", "4525784, \"remote_as\": 110, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 6, \"accepted_prefixes\": 5, \"received_prefixes\": 5", "u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up': True, u'description': u''}}}} ------------------------------------------------------------ { \"global\":", "104, u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes':", "} }, \"remote_id\": \"172.16.58.3\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" } } }", "True, u'description': u''}, '192.168.0.4': {u'is_enabled': True, u'uptime': 4525784, u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes':", "\"172.16.58.3\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" } } } } ------------------------------------------------------------ 4525930", "get_network_driver junos_driver = get_network_driver('junos') junos_device = {'username': 'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'} with", "u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up': True, u'description': u''}}}} ------------------------------------------------------------ { \"global\": { \"router_id\":", "{u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781, u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5,", "\"local_as\": 109, \"is_up\": true, \"description\": \"\" }, \"192.168.0.4\": { \"is_enabled\": true, \"uptime\": 4525784,", "-1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.58.3\", \"local_as\": 109, \"is_up\": true, \"description\": \"\"", "true, \"uptime\": 4525784, \"remote_as\": 110, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 6, \"accepted_prefixes\": 5,", "\"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.58.3\", \"local_as\":", "}, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.17.32\",", "\"172.16.17.32\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" }, \"192.168.0.4\": { \"is_enabled\": true, \"uptime\":", "with junos_driver(**junos_device) as junos: print('-'*60) print junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print", "------------------------------------------------------------ { \"global\": { \"router_id\": \"172.16.31.10\", \"peers\": { \"192.168.0.0\": { \"is_enabled\": true, \"uptime\":", "u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up': True,", "\"\" }, \"192.168.0.4\": { \"is_enabled\": true, \"uptime\": 4525784, \"remote_as\": 110, \"address_family\": { \"ipv4\":", "5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up':", "}, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.58.3\",", "6, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id':", "from json import dumps from napalm_base import get_network_driver junos_driver = get_network_driver('junos') junos_device =", "= {'username': 'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'} with junos_driver(**junos_device) as junos: print('-'*60) print", "-1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.17.32\", \"local_as\": 109, \"is_up\": true,", "import dumps from napalm_base import get_network_driver junos_driver = get_network_driver('junos') junos_device = {'username': 'pytraining',", "-1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up': True, u'description': u''}}}} ------------------------------------------------------------ {", "u'is_up': True, u'description': u''}, '192.168.0.4': {u'is_enabled': True, u'uptime': 4525784, u'remote_as': 110, u'address_family': {u'ipv4':", "\"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.17.32\", \"local_as\": 109, \"is_up\":", "{u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up': True, u'description':", "\"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.17.32\", \"local_as\": 109, \"is_up\": true, \"description\":", "print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global':", "True, u'uptime': 4525784, u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes': 5},", "\"192.168.0.0\": { \"is_enabled\": true, \"uptime\": 4525781, \"remote_as\": 104, \"address_family\": { \"ipv4\": { \"sent_prefixes\":", "{ \"ipv4\": { \"sent_prefixes\": 6, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\":", "109, \"is_up\": true, \"description\": \"\" }, \"192.168.0.4\": { \"is_enabled\": true, \"uptime\": 4525784, \"remote_as\":", "104, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 5, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\":", "}, \"remote_id\": \"172.16.58.3\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" } } } }", "{ \"is_enabled\": true, \"uptime\": 4525781, \"remote_as\": 104, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 5,", "u'172.16.17.32', u'local_as': 109, u'is_up': True, u'description': u''}, '192.168.0.4': {u'is_enabled': True, u'uptime': 4525784, u'remote_as':", "\"remote_id\": \"172.16.58.3\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" } } } } ------------------------------------------------------------", "{u'is_enabled': True, u'uptime': 4525784, u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes':", "4525784, u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes':", "\"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.58.3\", \"local_as\": 109, \"is_up\":", "u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1,", "-1 } }, \"remote_id\": \"172.16.58.3\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" } }", "\"172.16.31.10\", \"peers\": { \"192.168.0.0\": { \"is_enabled\": true, \"uptime\": 4525781, \"remote_as\": 104, \"address_family\": {", "{u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up': True, u'description':", "\"remote_as\": 104, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 5, \"accepted_prefixes\": 5, \"received_prefixes\": 5 },", "indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10', u'peers':", "u''}, '192.168.0.4': {u'is_enabled': True, u'uptime': 4525784, u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes':", "import get_network_driver junos_driver = get_network_driver('junos') junos_device = {'username': 'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'}", "junos_driver = get_network_driver('junos') junos_device = {'username': 'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'} with junos_driver(**junos_device)", "{ \"global\": { \"router_id\": \"172.16.31.10\", \"peers\": { \"192.168.0.0\": { \"is_enabled\": true, \"uptime\": 4525781,", "print('-'*60) print junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python", "u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781, u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes':", "junos_driver(**junos_device) as junos: print('-'*60) print junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime']", "u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1,", "u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781, u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes': 5,", "u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up': True, u'description': u''}}}} ------------------------------------------------------------", "-1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up': True, u'description': u''}}}}", "}, \"remote_id\": \"172.16.17.32\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" }, \"192.168.0.4\": { \"is_enabled\":", "{u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781, u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes':", "junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python junos_get_bgp_neighbors.py ------------------------------------------------------------", "\"remote_id\": \"172.16.17.32\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" }, \"192.168.0.4\": { \"is_enabled\": true,", "True, u'uptime': 4525781, u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes': 5},", "{'username': 'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'} with junos_driver(**junos_device) as junos: print('-'*60) print junos.get_bgp_neighbors()", "junos_device = {'username': 'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'} with junos_driver(**junos_device) as junos: print('-'*60)", "''' # python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime':", "print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled':", "\"local_as\": 109, \"is_up\": true, \"description\": \"\" } } } } ------------------------------------------------------------ 4525930 '''", "\"ipv4\": { \"sent_prefixes\": 5, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1,", "\"address_family\": { \"ipv4\": { \"sent_prefixes\": 6, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": {", "4525781, \"remote_as\": 104, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 5, \"accepted_prefixes\": 5, \"received_prefixes\": 5", "\"peers\": { \"192.168.0.0\": { \"is_enabled\": true, \"uptime\": 4525781, \"remote_as\": 104, \"address_family\": { \"ipv4\":", "u'uptime': 4525781, u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6':", "junos: print('-'*60) print junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' #", "{ \"sent_prefixes\": 5, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\":", "# python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781,", "\"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1", "} }, \"remote_id\": \"172.16.17.32\", \"local_as\": 109, \"is_up\": true, \"description\": \"\" }, \"192.168.0.4\": {", "\"is_up\": true, \"description\": \"\" }, \"192.168.0.4\": { \"is_enabled\": true, \"uptime\": 4525784, \"remote_as\": 110,", "5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 }", "'<PASSWORD>', 'hostname': '172.30.179.95'} with junos_driver(**junos_device) as junos: print('-'*60) print junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(),", "u'local_as': 109, u'is_up': True, u'description': u''}}}} ------------------------------------------------------------ { \"global\": { \"router_id\": \"172.16.31.10\", \"peers\":", "{ \"sent_prefixes\": 6, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\":", "-1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up': True, u'description': u''},", "\"uptime\": 4525784, \"remote_as\": 110, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 6, \"accepted_prefixes\": 5, \"received_prefixes\":", "u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up': True, u'description': u''}, '192.168.0.4': {u'is_enabled': True,", "u'local_as': 109, u'is_up': True, u'description': u''}, '192.168.0.4': {u'is_enabled': True, u'uptime': 4525784, u'remote_as': 110,", "print junos.get_bgp_neighbors() print('-'*60) print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python junos_get_bgp_neighbors.py", "{u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}},", "-1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up': True, u'description': u''}, '192.168.0.4': {u'is_enabled':", "{ \"192.168.0.0\": { \"is_enabled\": true, \"uptime\": 4525781, \"remote_as\": 104, \"address_family\": { \"ipv4\": {", "{ \"is_enabled\": true, \"uptime\": 4525784, \"remote_as\": 110, \"address_family\": { \"ipv4\": { \"sent_prefixes\": 6,", "6, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\":", "u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1,", "{ \"ipv4\": { \"sent_prefixes\": 5, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\":", "\"ipv4\": { \"sent_prefixes\": 6, \"accepted_prefixes\": 5, \"received_prefixes\": 5 }, \"ipv6\": { \"sent_prefixes\": -1,", "get_network_driver('junos') junos_device = {'username': 'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'} with junos_driver(**junos_device) as junos:", "junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True,", "u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1,", "{ \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\": \"172.16.58.3\", \"local_as\": 109,", "<filename>junos_get_bgp_neighbors.py from json import dumps from napalm_base import get_network_driver junos_driver = get_network_driver('junos') junos_device", "-1}}, u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up': True, u'description': u''}}}} ------------------------------------------------------------ { \"global\": {", "u'is_up': True, u'description': u''}}}} ------------------------------------------------------------ { \"global\": { \"router_id\": \"172.16.31.10\", \"peers\": { \"192.168.0.0\":", "print dumps(junos.get_bgp_neighbors(), indent=4) print('-'*60) print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime'] ''' # python junos_get_bgp_neighbors.py ------------------------------------------------------------ {u'global': {u'router_id':", "5 }, \"ipv6\": { \"sent_prefixes\": -1, \"accepted_prefixes\": -1, \"received_prefixes\": -1 } }, \"remote_id\":", "= get_network_driver('junos') junos_device = {'username': 'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'} with junos_driver(**junos_device) as" ]
[ "s1 + global memory f = lambda e: L.is_global(e) and (L.is_warp(e) or L.does_match(e,", "# Html file (including navigation and sections) class HtmlFile: \"\"\"Html file representing litmus", "basename (instead of default name)') p10.add_argument('-d', '--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return p", "InputAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) def get_cmdline_parser(cmds): #", "= produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce table with sections according", "p3.add_argument('-o', '--out', action='store', default=f) p3.add_argument('-d', '--diro', action='store', default='entries') p3.set_defaults(func=partial(mux, sections)) # Two-level p4", "default=f) p5.set_defaults(func=partial(mux, latex)) # Latex 2 p6 = sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction) f", "view the logfile for a test and chip, click on the corresponding number.", "'>\\n' self.items.append(s) self.add_nav_item(heading, level) self.secn += 1 def add_html(self, html): self.items.append(html) def finish(self,", "Process header s += '<tr>\\n' s += ' <th>Scope tree</th>\\n' s += '", "= os.path.basename(log) assert(type(chip) == str) chip_old = chip while True: chip = os.path.splitext(chip)[0]", "& R+S\\\\ \\hline \"\"\") # Scope and mem filters, including table description and", "= ma.get_pos_keys(logs, ks) all_matching += ks if ks: h.new_section(name, 0) s = produce_table(ks,", "0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce flat table", "] return d def get_section_names(): # Parallel the above functions names = [", "ma.Log)) assert(hasattr(args, 'diro')) # Get all the keys if pos: ks = ma.get_pos_keys(logs)", "tests: # Get all tests that match a simple test name (like rwc)", "sections)) # Two-level p4 = sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction) f = cmds[3]", "latex2)) # Latex 3 p7 = sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction) f = cmds[6]", "of the incantations') p9.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[8] p9.add_argument('-o',", "<td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td> <td>", "the incantations') p8.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[7] p8.add_argument('-o', '--out',", "L.is_mixed_mem), # Mixed scopes, global memory c(L.is_mixed_scope, L.is_global), # Mixed scopes, shared memory", "== \"__main__\": if len(sys.argv) == 1: sys.argv += ['-h'] cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py')", "lambda e: L.is_general_bc(e)] fs3 = [lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)] fs4", "in logs: e = log.get(k) if e: s += e.pp_cell_link_dir(2, diro) # Produce", "= '<h' + l + '><a id=\"id' + str(self.secn) + '\">' + heading", "L)) for t in short_names: l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if", "'latex', 'latex2', 'latex3', 'incantations', 'incantations-flat', 'incantations-html'] p = get_cmdline_parser(cmds) if cmd not in", "str(e.pos) + '\\\\\\\\' s = '\\n'.join(l) w_str(args.out, s) ### Produce latex tables def", "html p10 = sp.add_parser(cmds[9], description='Produce flat html tables comparing\\ the effectiveness of the", "= ma.get_entry(k, logs) return e.short_name.lower(), str(e.pos) l = list(map(mapper, ks)) l1, l2 =", "= lambda e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl(f,", "default=f, help='output file basename (instead of default name)') p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations html", "ks_s) all_matching += ks_s if ks_s: h.new_section(name, 0) # Now divide by other", "e: f(e) and g(e) # List of functions that each take a log", "containing raw litmus log e.store_log_dir(diro) else: s += '<td><a href=\"\">---</a></td>\\n' s += '</tr>\\n'", "ks2.sort() assert(len(ks2) == n) for i, k in enumerate(ks2): e = ma.get_entry(k, logs)", "while self.last_level > level: self.nav += sp * self.last_level + '</ul>\\n' self.last_level -=", "str)) short_names.sort() # Table header # '&nbsp;': non-breaking space # '&#x2713;': checkmark prefix", "& \\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\ & none & R", "' + sec + '}' + (' &' * nc) + r'\\\\' +", "filters, table description, filename suffix for sf, cfg, suf in sfs: s =", "= [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 = [lambda e: not", "media=\"screen\"/> <style> ul { padding-top: 10px; } li { padding-top: 5px; } th,", "'w+rw+2w', 's']), ('prop heavy', ['sb', 'rwc', 'iriw', 'r']), ('thin air', ['lb']) ] lfs", "__init__(self): self.items = [] self.nav = '<h4>Contents</h4>\\n' self.secn = 0 self.last_level = -1", "k in enumerate(ks1): e = ma.get_entry(k, logs) l.append(e.short_name.lower() + sep + str(e.pos) +", "</td> <td>1</td> <td>2</td> <td>3</td> <td>4</td> <td>5</td> <td>6</td> <td>7</td> <td>8</td> <td>9</td> <td>10</td> <td>11</td> <td>12</td>", "s += '</tr>\\n' # Process rows for k in ks: # Start new", "### Produce flat incantation tables def incantations_flat(args): log = args.input assert(type(log) == str)", "ul { padding-top: 10px; } li { padding-top: 5px; } th, td {", "c = type(inp) is list if not c: inp = [inp] inp =", "= cmds[4] + '.tex' p5.add_argument('-o', '--out', action='store', default=f) p5.set_defaults(func=partial(mux, latex)) # Latex 2", "memory c(L.is_warp, L.is_global), c(L.is_cta, L.is_global), c(L.is_ker, L.is_global), # Simple scopes, shared memory c(L.is_warp,", "p1.add_argument('-o', '--out', action='store', default=f) p1.add_argument('-d', '--diro', action='store', default='entries') p1.set_defaults(func=partial(mux, flat)) # Classified p2", "file f_out = out_base + '-' + suf + '.html' w_str(f_out, s) #", "R+S\\\\ \\hline \"\"\") # Scope and mem filters, including table description and filename", "\\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl2(f, logs, n) w_str(args.out, s) #", "log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c}", "(i & 0b0010) >> 1 i4 = (i & 0b0001) f1 = fs1[i1]", "HTML suffix after tables suffix = textwrap.dedent(\"\"\" </div> </div> </body> </html> \"\"\") def", "'COWW']))) s += latex_tbl2(f, logs, n) s += '\\n' # Produce d-cta:s-ker table,", "= textwrap.dedent(r\"\"\" <!DOCTYPE html> <html style=\"background:white;\"> <head> <meta charset=\"UTF-8\"> <title>Evaluating incantations</title> <link rel=\"stylesheet\"", "names = [ 'Different warps, same CTA; global memory', 'Different CTAs, same kernel;", "diro=args.diro) h = HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------ ### Fill up table", "\\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl(f, logs,", "# combination of incantations) are also ignored def incantations(args): log = args.input assert(type(log)", "Two-level p4 = sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction) f = cmds[3] + '.html'", "f != incantations_flat and f != incantations_html_flat: c = type(inp) is list if", "get_cmdline_parser(cmds): # Parent of all p = argparse.ArgumentParser() # Dummy parent for common", "for i, k in enumerate(ks3): e = ma.get_entry(k, logs) l[i] += str(e.pos) +", "'\\n' for t in tests: # Get all tests that match a simple", "p2 = sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction) f = cmds[1] + '.html' p2.add_argument('-o',", "scopes and memory regions; no filtering according to # names def get_section_filters(): def", "'2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc = ['CoWW', 'COWW'] ks =", "</tr> \"\"\") # Scope and mem filters, including table description and filename suffix", "n = len(ks1) l = list() for i, k in enumerate(ks1): e =", "= list(filter(f, l2)) if item: item = itemify(item) assert(type(item) == L) entry =", "= args.input assert(type(log) == str) chip = os.path.basename(log) assert(type(chip) == str) chip_old =", "'' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower() + sep + str(e.pos)", "l = list() for i, k in enumerate(ks1): e = ma.get_entry(k, logs) l.append(e.short_name.lower()", "'--out', action='store', default=f, help='output file basename (instead of default name)') p8.set_defaults(func=partial(mux, incantations)) #", "= out_base + '-' + suf + '.html' w_str(f_out, s) # ------------------------------------------------------------------------------ #######################", "CTA; shared memory', 'Different warps, same CTA; mixed memory', 'Mixed scopes, global memory',", "</td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td>", "the test run. </center> <br><br> \"\"\") # HTML suffix after tables suffix =", "get_axiom_patterns() h = HtmlFile() all_matching = [] for name, val in l: ks_s", "header s += r'{\\bf ' + sec + '}' + (' &' *", "ks)) header = sep.join([\"Test\" + sep + \"Freq.\"] * n) + \"\\\\\\\\\\n\" header", "Produce file containing raw litmus log item.store_log_dir(args.diro) else: # ppi_incantations: mem_stress, general_bc, barrier,", "as L from generic import lty, interleave, itemify, dupchk, listify, w_str # ------------------------------------------------------------------------------", "in ks: # Start new row s += '<tr>\\n' le = ma.get_entry(k, logs)", "= sep.join([\"Test\" + sep + \"Freq.\"] * n) + \"\\\\\\\\\\n\" header += '\\midrule\\n'", "+ str(self.secn) + '\">' + heading + '</a></h'\\ + l + '>\\n' self.items.append(s)", "logs) l[i] += str(e.pos) + sep # s2 + global memory f =", "class=\"inner\"> <h1>Evaluating incantations</h1> <br> <center> To view the logfile for a test, click", "#!/usr/bin/env python3 import argparse import os import sys import collections import textwrap from", "description, filename suffix for sf, cfg, suf in sfs: s = prefix s", "same CTA; global memory', 'Different CTAs, same kernel; global memory', 'Different kernels, same", "default=f) p4.add_argument('-d', '--diro', action='store', default='entries') p4.set_defaults(func=partial(mux, two_level)) # Latex p5 = sp.add_parser(cmds[4], parents=[parent])", "incantations</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style> ul { padding-top: 10px; } li", "# Latex 3 p7 = sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction) f = cmds[6] +", "0) ks.sort() s = produce_table(ks, logs) h.add_html(s) h.finish() h.write(args.out) ### Two level classification", "def __init__(self): self.items = [] self.nav = '<h4>Contents</h4>\\n' self.secn = 0 self.last_level =", "l.append(args.out) chk(not dupchk(l), 'duplicate files given') # Read ordinary logs (if we do", "<td>3</td> <td>4</td> <td>5</td> <td>6</td> <td>7</td> <td>8</td> <td>9</td> <td>10</td> <td>11</td> <td>12</td> <td>13</td> <td>14</td> <td>15</td>", "prefix and suffix name = os.path.basename(log.fn) idx = name.find('.') if idx != -1:", "global memory', 'Different kernels, same device; global memory', 'Different warps, same CTA; shared", "s # Filtering according to scopes and memory regions; no filtering according to", "e.store_log_dir(diro) else: s += '<td><a href=\"\">---</a></td>\\n' s += '</tr>\\n' s += '</table>\\n' return", "To view the logfile for a test and chip, click on the corresponding", "axioms def classified(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro'))", "# Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g g", "+= latex_tbl(f, logs, n) s += '\\n' # Produce d-cta:s-ker table, global memory", "val in l: ks_s = ma.get_matching_keys(val, logs) if pos: ks_s = ma.get_pos_keys(logs, ks_s)", "global memory', 's2-global') ] # Column filters fs1 = [lambda e: not L.is_mem_stress(e),", "L)) if (len(l2) == 0): continue s += t for i in range(0,", "= ['CoWW', 'COWW'] ks = ma.get_matching_keys(l, logs) # Names + s1 + global", "# Parent of all p = argparse.ArgumentParser() # Dummy parent for common options", "incantation tables def incantations_html_flat(args): log = args.input assert(type(log) == str) assert(hasattr(args, 'diro')) chip", "0) ks_s.sort() filters = get_section_filters() names = get_section_names() for f, name in zip(filters,", "and the incantations used for the test run. </center> <br><br> \"\"\") # HTML", "= '<h4>Contents</h4>\\n' self.secn = 0 self.last_level = -1 def add_nav_item(self, link, level): sp", "\"\"\"Html file representing litmus test results\"\"\" sp = ' ' # HTML prefix", "f.write(self.s) f.close() # ------------------------------------------------------------------------------ ### Used by all HTML file producers # ks:", "l + '>\\n' self.items.append(s) self.add_nav_item(heading, level) self.secn += 1 def add_html(self, html): self.items.append(html)", "f = lambda e: L.is_global(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e,", "def flat(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) #", "log.get_all() assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header #", "test run. </center> <br><br> \"\"\") # HTML suffix after tables suffix = textwrap.dedent(\"\"\"", "file containing raw litmus log e.store_log_dir(diro) else: s += '<td><a href=\"\">---</a></td>\\n' s +=", "Incantations:} & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\ & none & R & S", "of elements on line def fill_up(l, sep, end, nl): n = len(l) s", "+ [self.suffix] self.s = ''.join(l) def write(self, fn): assert(self.s) f = open(fn, 'w')", "default name)') p10.add_argument('-d', '--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return p if __name__ ==", "# Incantations p8 = sp.add_parser(cmds[7], description='Produce tables comparing the\\ effectiveness of the incantations')", "with all tests def flat(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log))", "if ks: h.new_section('Other', 0) ks.sort() s = produce_table(ks, logs) h.add_html(s) h.finish() h.write(args.out) ###", "### Produce flat incantation tables def incantations_html_flat(args): log = args.input assert(type(log) == str)", "help='log (text or pickle)') f = cmds[7] p8.add_argument('-o', '--out', action='store', default=f, help='output file", "= ma.get_matching_keys(l, logs) # Names + s1 + global memory f = lambda", "objects (only logs which have the key are included in the # table)", "[lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 = [lambda e: not L.is_general_bc(e),", "not c: inp = [inp] inp = ma.get_logs(inp, lh=ma.Log) if not c: inp", "fs2[i2] f3 = fs3[i3] f4 = fs4[i4] f = lambda e: f1(e) and", "other sections filters = get_section_filters() names = get_section_names() for f, name in zip(filters,", "ks = list(ks) if ks: h.new_section('Other', 0) ks.sort() s = produce_table(ks, logs) h.add_html(s)", "= fs2[i2] f3 = fs3[i3] f4 = fs4[i4] f = lambda e: f1(e)", "'Different CTAs, same kernel; global memory', 'Different kernels, same device; global memory', 'Different", "= str(level+2) s = '<h' + l + '><a id=\"id' + str(self.secn) +", "s += '<td><a href=\"\">---</a></td>\\n' s += '</tr>\\n' s += '</table>\\n' return s #", "------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_flat(args): log = args.input assert(type(log) ==", "(e.g. for a certain # combination of incantations) are also ignored def incantations(args):", "== 1) log = log[0] out_base = args.out assert(out_base) les = log.get_all() assert(lty(les,", "given') # Read ordinary logs (if we do not want to read an", "= inp f(args) ############### # Subcommands # ############### ### Produce table with sections", "+ '>\\n' self.items.append(s) self.add_nav_item(heading, level) self.secn += 1 def add_html(self, html): self.items.append(html) def", "</td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\")", "= args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) s = '' h", "= log[0] out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L)) # Table", "c(f, g): return lambda e: f(e) and g(e) # List of functions that", "<td>6</td> <td>7</td> <td>8</td> <td>9</td> <td>10</td> <td>11</td> <td>12</td> <td>13</td> <td>14</td> <td>15</td> <td>16</td> </tr> <tr>", "<td> </td><td> </td><td> </td><td> </td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr>", "= args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l = get_axiom_patterns() h = HtmlFile() all_matching", "latex_tbl2(f, logs, n) w_str(args.out, s) # ------------------------------------------------------------------------------ ### Produce incantations tables # All", "HTML prefix before tables prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html> <head> <meta charset=\"UTF-8\">", "default='entries') p2.set_defaults(func=partial(mux, classified)) # Sections p3 = sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction) f", "memory c(L.is_warp, L.is_mixed_mem), # Mixed scopes, global memory c(L.is_mixed_scope, L.is_global), # Mixed scopes,", "short_names.sort() # Table header # '&nbsp;': non-breaking space # '&#x2713;': checkmark prefix =", "self.close_nav(level) self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">' + link", "} table { border-top: none; } </style> </head> <body> <div class=\"outer\" style=\"width: 100%;\">", "get_cmdline_parser(cmds) if cmd not in cmds: p.print_help() sys.exit(2) print('cmd: ' + cmd) pr", "h.add_html(s) h.finish() h.write(args.out) ### Two level classification def two_level(args): pos = args.pos logs", "and f3(e) and f4(e) entry = '-' item = list(filter(f, l2)) if item:", "for i, k in enumerate(ks2): e = ma.get_entry(k, logs) l[i] += str(e.pos) +", "<td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>", "((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl2(f, logs, n)", "action=InputAction) f = cmds[5] + '.tex' p6.add_argument('-o', '--out', action='store', default=f) p6.set_defaults(func=partial(mux, latex2)) #", "action='store', default='entries') p3.set_defaults(func=partial(mux, sections)) # Two-level p4 = sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction)", "= sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction) f = cmds[2] + '.html' p3.add_argument('-o', '--out',", "str(e.pos) l = list(map(mapper, ks)) header = sep.join([\"Test\" + sep + \"Freq.\"] *", "# ------------------------------------------------------------------------------ ####################### # Command line parser # ####################### # Open files and", "chip name chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while True:", "</td><td> </td><td> </td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td>", "L.is_shared(e), 'All threads in different warps, shared memory', 's1-shared'), (lambda e: L.is_cta(e) and", "lh=ma.Log) if not c: inp = inp[0] args.input = inp f(args) ############### #", "filter building blocks (need to be combined to yield a single column #", "+= 1 def add_html(self, html): self.items.append(html) def finish(self, nav=True): self.close_nav(-1) l = [self.prefix]", "' <th>Scope tree</th>\\n' s += ' <th>Memory map</th>\\n' s += ' <th>Name</th>\\n' for", "Fill up table line by line # l: list of items # sep:", "str) # Get incantation log log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) ==", "-1 def add_nav_item(self, link, level): sp = self.sp li = sp * (level", "\"\" while l: chunk = l[:nl] line = sep.join(chunk) s += line +", "'</tr>\\n' s += \"\"\" </table> </div> </div> </body> </html> \"\"\" # Write table", "e = log.get(k) if e: s += e.pp_cell_link_dir(2, diro) # Produce file containing", "level) self.secn += 1 def add_html(self, html): self.items.append(html) def finish(self, nav=True): self.close_nav(-1) l", "Read ordinary logs (if we do not want to read an incantation log)", "'diro')) s = '' h = HtmlFile() filters = get_section_filters() names = get_section_names()", "line def fill_up(l, sep, end, nl): n = len(l) s = \"\" while", "log.get_all() assert(lty(les, L)) # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l", "ks_s = list(ks_s) if ks_s: h.new_section('Other', 0) ks_s.sort() filters = get_section_filters() names =", "not c: inp = inp[0] args.input = inp f(args) ############### # Subcommands #", "# ------------------------------------------------------------------------------ # Html file (including navigation and sections) class HtmlFile: \"\"\"Html file", "h.write(args.out) ### Produce table with sections according to scopes and memory regions def", "(L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl2(f, logs, n) s += '\\n' # Produce", "chip = os.path.splitext(chip)[0] if chip == chip_old: break chip_old = chip assert(type(chip) ==", "'<tr>\\n' s += '<td>' + t + '</td>' for i in range(0, nc):", "of the incantations') p10.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[9] p10.add_argument('-o',", "assert(type(chip) == str) log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log", "ma.Log)) assert(hasattr(args, 'diro')) s = '' h = HtmlFile() filters = get_section_filters() names", "= sp.add_parser(cmds[8], description='Produce flat tables comparing the\\ effectiveness of the incantations') p9.add_argument('input', action=InputAction,", "'--out', action='store', default=f) p5.set_defaults(func=partial(mux, latex)) # Latex 2 p6 = sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input',", "if pos: ks_s = ma.get_pos_keys(logs, ks_s) all_matching += ks_s if ks_s: h.new_section(name, 0)", "g g g g r r r r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration:", "cfg, 1) s = s.replace('<chip>', chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L))", "(lambda e: L.is_warp(e) and L.is_shared(e), 'All threads in different warps, shared memory', 's1-shared'),", "class HtmlFile: \"\"\"Html file representing litmus test results\"\"\" sp = ' ' #", "def add_nav_item(self, link, level): sp = self.sp li = sp * (level +", "if ks_s: h.new_section('Other', 0) ks_s.sort() filters = get_section_filters() names = get_section_names() for f,", "= zip(*l) l = interleave(l1, l2, n) s = fill_up(l, sep, '\\\\\\\\\\n', n)", "self.items.append(s) self.add_nav_item(heading, level) self.secn += 1 def add_html(self, html): self.items.append(html) def finish(self, nav=True):", "log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] #", "in logs if l.any_key(ks) ] s = '<table>\\n' # Process header s +=", "= log.get(k) if e: s += e.pp_cell_link_dir(2, diro) # Produce file containing raw", "str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif level == self.last_level + 1:", "<config> </td> </tr> </table> </center> <br> <table> <tr> <td> </td> <td>1</td> <td>2</td> <td>3</td>", "Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\ & none & R & S & R+S &", "suffix sfs = [ (lambda e: L.is_warp(e) and L.is_global(e), 'All threads in different", "def latex_tbl(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep = ' & '", "e: L.is_global(e) and (L.is_warp(e) or L.does_match(e, lc)) ks1 = ma.get_filtered_keys(f, logs, ks) ks1.sort()", "str(entry) s += '\\\\\\\\\\n' s += '\\\\hline\\n' s += '\\\\end{tabular}\\n' # Write table", "d def get_section_names(): # Parallel the above functions names = [ 'Different warps,", "(instead of default name)') p8.set_defaults(func=partial(mux, incantations)) # Incantations flat p9 = sp.add_parser(cmds[8], description='Produce", "('SC per location', ['CO', 'Co']), ('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']),", "in different CTAs, global memory', 's2-global') ] # Column filter building blocks (need", "lambda e: L.is_mem_stress(e)] fs2 = [lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)] fs3", "else: ks_s = ma.get_keys(logs) ks_s = set(ks_s) - all_matching ks_s = list(ks_s) if", "'</a></h'\\ + l + '>\\n' self.items.append(s) self.add_nav_item(heading, level) self.secn += 1 def add_html(self,", "number of elements on line def fill_up(l, sep, end, nl): n = len(l)", "file basename (instead of default name)') p10.add_argument('-d', '--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return", "& ' s = '' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower()", "def latex3(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log) n =", "scopes, global memory c(L.is_mixed_scope, L.is_global), # Mixed scopes, shared memory c(L.is_mixed_scope, L.is_shared), #", "\\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl(f, logs, n) w_str(args.out, s) def", "# ####################### # Open files and parse or unpickle class InputAction(argparse.Action): def __call__(self,", "L.is_general_bc(e), lambda e: L.is_general_bc(e)] fs3 = [lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)]", "assert(lty(l1, L)) for t in short_names: l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L))", "map</th>\\n' s += ' <th>Name</th>\\n' for log in logs: # Remove directory prefix", "'\\midrule\\n' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower(), str(e.pos) l = list(map(mapper,", "args.input l = list(listify(inp)) if hasattr(args, 'out'): l.append(args.out) chk(not dupchk(l), 'duplicate files given')", "incantations_flat and f != incantations_html_flat: c = type(inp) is list if not c:", "= set(ks) - all_matching ks = list(ks) if ks: h.new_section('Other', 0) ks.sort() s", "### Produce incantations tables # All tests that are not explicitely listed under", "per axiom def get_axiom_patterns(): l = [ ('SC per location', ['CO', 'Co']), ('No", "is list if not c: inp = [inp] inp = ma.get_logs(inp, lh=ma.Log) if", "0): continue # Name of test s += t for i in range(0,", "+ '.html' p1.add_argument('-o', '--out', action='store', default=f) p1.add_argument('-d', '--diro', action='store', default='entries') p1.set_defaults(func=partial(mux, flat)) #", "<html> <head> <meta charset=\"UTF-8\"> <title>GPU Litmus Test Results</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/>", "<td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip> </td> </tr> <tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\">", "+ '><a id=\"id' + str(self.secn) + '\">' + heading + '</a></h'\\ + l", "of default name)') p10.add_argument('-d', '--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return p if __name__", "CTA; global memory', 'Different CTAs, same kernel; global memory', 'Different kernels, same device;", "<title>GPU Litmus Test Results</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head> <body> <div class=\"outer\">", "latex)) # Latex 2 p6 = sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction) f = cmds[5]", "on the corresponding number. The logfile contains the litmus test code, and the", "rand_threads s += ' & ' + str(entry) s += '\\\\\\\\\\n' s +=", "mem filters, including table description and filename suffix sfs = [ (lambda e:", "inp[0] args.input = inp f(args) ############### # Subcommands # ############### ### Produce table", "'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] # Produce d-warp:s-cta table,", "return names # Get key patterns per axiom def get_axiom_patterns(): l = [", "lc)) ks3 = ma.get_filtered_keys(f, logs, ks) ks3.sort() assert(len(ks3) == n) for i, k", "[lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)] fs4 = [lambda e: not L.is_rand_threads(e),", "15px; } td:nth-child(1) { text-align: left; } tr:nth-child(1), tr:nth-child(5) { border-bottom: 2px solid", "'COWW'] ks = ma.get_matching_keys(l, logs) # Names + s1 + global memory f", "(L.is_cta(e) or L.does_match(e, lc)) ks3 = ma.get_filtered_keys(f, logs, ks) ks3.sort() assert(len(ks3) == n)", "assert(self.s) f = open(fn, 'w') f.write(self.s) f.close() # ------------------------------------------------------------------------------ ### Used by all", "== 1: sys.argv += ['-h'] cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds = ['flat', 'classified',", "Get chip name chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while", "suf in sfs: s = prefix s = s.replace('<config>', cfg, 1) s =", "'sections', 'two-level', 'latex', 'latex2', 'latex3', 'incantations', 'incantations-flat', 'incantations-html'] p = get_cmdline_parser(cmds) if cmd", "p = get_cmdline_parser(cmds) if cmd not in cmds: p.print_help() sys.exit(2) print('cmd: ' +", "# ------------------------------------------------------------------------------ ############ # Toplevel # ############ # f: function to be called;", "((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl(f, logs, n)", "= ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] out_base =", "to be combined to yield a single column # filter) fs1 = [lambda", "machinery import LogEntry as L from generic import lty, interleave, itemify, dupchk, listify,", "+ '}' + (' &' * nc) + r'\\\\' + '\\n' for t", "Get incantation log log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log", "l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl2(f, logs, n) s += '\\n'", "# '&#x2713;': checkmark prefix = textwrap.dedent(r\"\"\" <!DOCTYPE html> <html style=\"background:white;\"> <head> <meta charset=\"UTF-8\">", "of a result, it is either because optcheck failed or because there were", "# ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += '<td>' + str(entry) + '</td>'", "+ '\">' + heading + '</a></h'\\ + l + '>\\n' self.items.append(s) self.add_nav_item(heading, level)", "= list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue # Name", "names): ks = ma.get_filtered_keys(f, logs, ks_s) if pos: ks = ma.get_pos_keys(logs, ks) if", "or L.does_match(e, lc)) ks3 = ma.get_filtered_keys(f, logs, ks) ks3.sort() assert(len(ks3) == n) for", "common options parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true') # Subparsers sp = p.add_subparsers(help='use", "p8.set_defaults(func=partial(mux, incantations)) # Incantations flat p9 = sp.add_parser(cmds[8], description='Produce flat tables comparing the\\", "f3 = fs3[i3] f4 = fs4[i4] f = lambda e: f1(e) and f2(e)", "= cmds[8] p9.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default name)')", "filename suffix for sf, cfg, suf in sfs: s = prefix s =", "close_nav(self, level): sp = self.sp while self.last_level > level: self.nav += sp *", "= level def close_nav(self, level): sp = self.sp while self.last_level > level: self.nav", "black; } table { border-top: none; } </style> </head> <body> <div class=\"outer\" style=\"width:", "<chip> </td> </tr> <tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config> </td> </tr> </table>", "Simple scopes, mixed memory c(L.is_warp, L.is_mixed_mem), # Mixed scopes, global memory c(L.is_mixed_scope, L.is_global),", "global memory c(L.is_warp, L.is_global), c(L.is_cta, L.is_global), c(L.is_ker, L.is_global), # Simple scopes, shared memory", "= sp.add_parser(cmds[7], description='Produce tables comparing the\\ effectiveness of the incantations') p8.add_argument('input', action=InputAction, help='log", "\\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\ & none & R & S & R+S", "in lfs.items(): tests.sort() # Section header s += r'{\\bf ' + sec +", "' l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]',", "+ t + '</td>' for i in range(0, nc): i1 = (i &", "else: ks = ma.get_keys(logs) ks = set(ks) - all_matching ks = list(ks) if", "class=\"outer\" style=\"width: 100%;\"> <div class=\"inner\"> <h1>Evaluating incantations</h1> <br> <center> To view the logfile", "light', ['2+2w', 'w+rw+2w', 's']), ('prop heavy', ['sb', 'rwc', 'iriw', 'r']), ('thin air', ['lb'])", "</td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\") # Scope and", "+ '\">' + link +\\ '</a></li>\\n' else: assert(False) self.last_level = level def close_nav(self,", "= lambda e: L.is_global(e) and (L.is_warp(e) or L.does_match(e, lc)) ks1 = ma.get_filtered_keys(f, logs,", "== 0): continue # Name of test s += t for i in", "help='output file basename (instead of default name)') p8.set_defaults(func=partial(mux, incantations)) # Incantations flat p9", "table # logs: list of log objects (only logs which have the key", "(only logs which have the key are included in the # table) def", "e: not L.is_barrier(e), lambda e: L.is_barrier(e)] fs4 = [lambda e: not L.is_rand_threads(e), lambda", "\\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra", "list(listify(inp)) if hasattr(args, 'out'): l.append(args.out) chk(not dupchk(l), 'duplicate files given') # Read ordinary", "sys import collections import textwrap from functools import partial import machinery as ma", "Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ]) ] return l # ------------------------------------------------------------------------------ ############", "ks = ma.get_filtered_keys(f, logs, ks_s) if pos: ks = ma.get_pos_keys(logs, ks) if ks:", "of all p = argparse.ArgumentParser() # Dummy parent for common options parent =", "= -1 def add_nav_item(self, link, level): sp = self.sp li = sp *", "assert(type(logs) == ma.Log) sep = ' & ' l = ['CO', 'Co', 'LB[^+]',", "les)) assert(lty(l1, L)) for t in short_names: l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2,", "<td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\") # Scope and mem filters,", "if hasattr(args, 'out'): l.append(args.out) chk(not dupchk(l), 'duplicate files given') # Read ordinary logs", "ks_s: h.new_section('Other', 0) ks_s.sort() filters = get_section_filters() names = get_section_names() for f, name", "f != incantations_html_flat: c = type(inp) is list if not c: inp =", "list(ks_s) if ks_s: h.new_section('Other', 0) ks_s.sort() filters = get_section_filters() names = get_section_names() for", "# Names + s1 + global memory f = lambda e: L.is_global(e) and", "on the chip to run the test. </center> <br> <center> <table style=\"border:none\"> <tr", "list of test names to include in the table # logs: list of", "== str) # Get incantation log log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log)", "R & S & R+S\\\\ \\hline \"\"\") # Scope and mem filters, including", "'</td>' for i in range(0, nc): i1 = (i & 0b1000) >> 3", "ks_s = ma.get_pos_keys(logs, ks_s) all_matching += ks_s if ks_s: h.new_section(name, 0) # Now", "self.sp while self.last_level > level: self.nav += sp * self.last_level + '</ul>\\n' self.last_level", "litmus test code, and the incantations used for the test run. </center> <br><br>", "global memory', 'Different CTAs, same kernel; global memory', 'Different kernels, same device; global", "p6.set_defaults(func=partial(mux, latex2)) # Latex 3 p7 = sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction) f =", "= s.replace('<config>', cfg, 1) s = s.replace('<chip>', chip, 1) l1 = list(filter(sf, les))", "p7 = sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction) f = cmds[6] + '.tex' p7.add_argument('-o', '--out',", "latex_tbl(f, logs, n) w_str(args.out, s) def latex2(args): pos = args.pos logs = args.input", "\\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g g g r r r r g g g", "# ############### ### Produce table with sections according to axioms def classified(args): pos", "Configuration: <config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical Incantations:} &", "s = '\\n'.join(l) w_str(args.out, s) ### Produce latex tables def latex3(args): pos =", "args.input assert(type(log) == str) assert(hasattr(args, 'diro')) chip = os.path.basename(log) assert(type(chip) == str) chip_old", "<td>14</td> <td>15</td> <td>16</td> </tr> <tr> <td>memory&nbsp;stress</td> <td> </td><td> </td><td> </td><td> </td> <td> </td><td>", "& \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\ &", "{ padding-top: 10px; } li { padding-top: 5px; } th, td { text-align:", "Incantations html p10 = sp.add_parser(cmds[9], description='Produce flat html tables comparing\\ the effectiveness of", "tables comparing\\ the effectiveness of the incantations') p10.add_argument('input', action=InputAction, help='log (text or pickle)')", "s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce flat table with", "level): assert(0 <= level <= 2) l = str(level+2) s = '<h' +", "right; padding: 5px; padding-right: 15px; padding-left: 15px; } td:nth-child(1) { text-align: left; }", "h.add_html(s) all_matching = set(all_matching) if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs)", "entries (e.g. for a certain # combination of incantations) are also ignored def", "<td>8</td> <td>9</td> <td>10</td> <td>11</td> <td>12</td> <td>13</td> <td>14</td> <td>15</td> <td>16</td> </tr> <tr> <td>memory&nbsp;stress</td> <td>", "'<ul>\\n' self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">' + link", "if not c: inp = [inp] inp = ma.get_logs(inp, lh=ma.Log) if not c:", "Scope and mem filters, table description, filename suffix for sf, cfg, suf in", "sys.argv += ['-h'] cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds = ['flat', 'classified', 'sections', 'two-level',", "args.input = inp f(args) ############### # Subcommands # ############### ### Produce table with", "assert(type(logs) == ma.Log) n = 4 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]',", "ks = set(ks) - all_matching ks = list(ks) if ks: h.new_section('Other', 0) ks.sort()", "memory f = lambda e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s", "line + ((nl - len(chunk)) * sep) + end l = l[nl:] return", "+ suf + '.html' w_str(f_out, s) # ------------------------------------------------------------------------------ ####################### # Command line parser", "href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif level == self.last_level", "cmds[5] + '.tex' p6.add_argument('-o', '--out', action='store', default=f) p6.set_defaults(func=partial(mux, latex2)) # Latex 3 p7", "<br> <table> <tr> <td> </td> <td>1</td> <td>2</td> <td>3</td> <td>4</td> <td>5</td> <td>6</td> <td>7</td> <td>8</td>", "list of items # sep: separator # end: end of line # n:", "The logfile also contains the litmus test code. When a dash appears instead", "+ shared memory f = lambda e: L.is_shared(e) and (L.is_warp(e) or L.does_match(e, lc))", "-h for further help', title= 'subcommands') # Flat p1 = sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input',", "16 # Scope and mem filters, table description, filename suffix for sf, cfg,", "set(all_matching) if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) ks = set(ks)", "nargs='+', action=InputAction) f = cmds[1] + '.html' p2.add_argument('-o', '--out', action='store', default=f) p2.add_argument('-d', '--diro',", "include in the table # logs: list of log objects (only logs which", "sections(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) s =", "L.is_global(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s =", "[] self.nav = '<h4>Contents</h4>\\n' self.secn = 0 self.last_level = -1 def add_nav_item(self, link,", "= args.input assert(type(log) == str) assert(hasattr(args, 'diro')) chip = os.path.basename(log) assert(type(chip) == str)", "# Dummy parent for common options parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true') #", "the # table) def produce_table(ks, logs, diro='entries'): logs = [ l for l", "+ str(entry) + '</td>' s += '</tr>\\n' s += \"\"\" </table> </div> </div>", "navigation and sections) class HtmlFile: \"\"\"Html file representing litmus test results\"\"\" sp =", "p if __name__ == \"__main__\": if len(sys.argv) == 1: sys.argv += ['-h'] cmd", "produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce flat table with all tests", "</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td>", "[ ('uniproc', ['corr', 'corw', 'cowr', 'coww']), ('observation', ['mp', 'isa2', 'wrc']), ('prop light', ['2+2w',", "# Process rows for k in ks: # Start new row s +=", "short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85}", "p4.add_argument('-d', '--diro', action='store', default='entries') p4.set_defaults(func=partial(mux, two_level)) # Latex p5 = sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input',", "= [] for name, val in l: ks = ma.get_matching_keys(val, logs) if pos:", "= sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction) f = cmds[0] + '.html' p1.add_argument('-o', '--out',", "f = open(fn, 'w') f.write(self.s) f.close() # ------------------------------------------------------------------------------ ### Used by all HTML", "global memory', 'Different warps, same CTA; shared memory', 'Different warps, same CTA; mixed", "itemify(item) assert(type(item) == L) entry = item.pos s += item.pp_cell_link_dir(2, args.diro) # Produce", "'(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ]) ] return l #", "[lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Line filters", "[inp] inp = ma.get_logs(inp, lh=ma.Log) if not c: inp = inp[0] args.input =", "p4.add_argument('input', nargs='+', action=InputAction) f = cmds[3] + '.html' p4.add_argument('-o', '--out', action='store', default=f) p4.add_argument('-d',", "os.path.basename(log.fn) idx = name.find('.') if idx != -1: name = name[:idx] s +=", "action=InputAction) f = cmds[2] + '.html' p3.add_argument('-o', '--out', action='store', default=f) p3.add_argument('-d', '--diro', action='store',", "of the incantations') p8.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[7] p8.add_argument('-o',", "left; } tr:nth-child(1), tr:nth-child(5) { border-bottom: 2px solid black; } table { border-top:", "incantations)) # Incantations flat p9 = sp.add_parser(cmds[8], description='Produce flat tables comparing the\\ effectiveness", "all_matching = set(all_matching) if pos: ks_s = ma.get_pos_keys(logs) else: ks_s = ma.get_keys(logs) ks_s", "'--out', action='store', default=f) p1.add_argument('-d', '--diro', action='store', default='entries') p1.set_defaults(func=partial(mux, flat)) # Classified p2 =", "str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif level < self.last_level: self.close_nav(level) self.nav", "ks: list of test names to include in the table # logs: list", "s.replace('<chip>', chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for sec, tests in", "all_matching += ks_s if ks_s: h.new_section(name, 0) # Now divide by other sections", "ks) if ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) # Rest", "filename, default is the command name out_base = args.out assert(out_base) les = log.get_all()", "& 0b0100) >> 2 i3 = (i & 0b0010) >> 1 i4 =", "and L.is_global(e), 'All threads in different warps, global memory', 's1-global'), (lambda e: L.is_warp(e)", "= list(map(mapper, ks)) l1, l2 = zip(*l) l = interleave(l1, l2, n) s", "'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] # Produce d-warp:s-cta table, global", "for the test run. </center> <br><br> \"\"\") # HTML suffix after tables suffix", "sep.join([\"Test\" + sep + \"Freq.\"] * n) + \"\\\\\\\\\\n\" header += '\\midrule\\n' s", "tables suffix = textwrap.dedent(\"\"\" </div> </div> </body> </html> \"\"\") def __init__(self): self.items =", "take a log entry d = [ # Simple scopes, global memory c(L.is_warp,", "= ma.get_filtered_keys(f, logs) if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 0)", "s += ' & ' + str(entry) s += '\\\\\\\\\\n' s += '\\\\end{tabular}\\n'", "barrier, rand_threads s += ' & ' + str(entry) s += '\\\\\\\\\\n' s", "+= e.pp_cell_link_dir(2, diro) # Produce file containing raw litmus log e.store_log_dir(diro) else: s", "+ '.html' p3.add_argument('-o', '--out', action='store', default=f) p3.add_argument('-d', '--diro', action='store', default='entries') p3.set_defaults(func=partial(mux, sections)) #", "= cmds[7] p8.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default name)')", "+ sep) # s1 + shared memory f = lambda e: L.is_shared(e) and", "self.items = [] self.nav = '<h4>Contents</h4>\\n' self.secn = 0 self.last_level = -1 def", "all_matching += ks if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s)", "code, and the incantations used for the test run. </center> <br><br> \"\"\") #", "- all_matching ks_s = list(ks_s) if ks_s: h.new_section('Other', 0) ks_s.sort() filters = get_section_filters()", "log) if f != incantations and f != incantations_flat and f != incantations_html_flat:", "'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc = ['CoWW', 'COWW'] ks = ma.get_matching_keys(l, logs)", "'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] # Produce", "self.items.append(html) def finish(self, nav=True): self.close_nav(-1) l = [self.prefix] if nav: l += [self.nav]", "f = lambda e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s +=", "s = produce_table(ks, logs) h.add_html(s) h.finish() h.write(args.out) ### Two level classification def two_level(args):", "= list(filter(sf, les)) assert(lty(l1, L)) for sec, tests in lfs.items(): tests.sort() # Section", "flat incantation tables def incantations_html_flat(args): log = args.input assert(type(log) == str) assert(hasattr(args, 'diro'))", "s += e.pp_cell_link_dir(2, diro) # Produce file containing raw litmus log e.store_log_dir(diro) else:", "str) # Get chip name chip = os.path.basename(log) assert(type(chip) == str) chip_old =", "sp * (level + 1) ul = sp * (self.last_level + 1) if", "sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction) f = cmds[0] + '.html' p1.add_argument('-o', '--out', action='store',", "log log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0]", "sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction) f = cmds[5] + '.tex' p6.add_argument('-o', '--out', action='store', default=f)", "\\hline & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\", "\\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl2(f, logs,", "### Produce latex tables def latex3(args): pos = args.pos logs = args.input assert(type(logs)", "warps, same CTA; mixed memory', 'Mixed scopes, global memory', 'Mixed scopes, shared memory',", "style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip> </td> </tr> <tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td", "f1(e) and f2(e) and f3(e) and f4(e) entry = '-' item = list(filter(f,", "</td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td>", "name out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L)) short_names = log.get_names()", "L.is_mixed_mem) ] return d def get_section_names(): # Parallel the above functions names =", "import lty, interleave, itemify, dupchk, listify, w_str # ------------------------------------------------------------------------------ # Html file (including", "'diro')) # Get all the keys if pos: ks = ma.get_pos_keys(logs) else: ks", "0b0100) >> 2 i3 = (i & 0b0010) >> 1 i4 = (i", "the incantations used for the test run. </center> <br><br> \"\"\") # HTML suffix", "logs, n) w_str(args.out, s) def latex2(args): pos = args.pos logs = args.input assert(type(logs)", "level <= 2) l = str(level+2) s = '<h' + l + '><a", "enumerate(ks3): e = ma.get_entry(k, logs) l[i] += str(e.pos) + '\\\\\\\\' s = '\\n'.join(l)", "border-top: none; } </style> </head> <body> <div class=\"outer\" style=\"width: 100%;\"> <div class=\"inner\"> <h1>Evaluating", "1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) # Rest all_matching = set(all_matching) if", "+\\ '</a></li>\\n' else: assert(False) self.last_level = level def close_nav(self, level): sp = self.sp", "p4.add_argument('-o', '--out', action='store', default=f) p4.add_argument('-d', '--diro', action='store', default='entries') p4.set_defaults(func=partial(mux, two_level)) # Latex p5", "th, td { text-align: right; padding: 5px; padding-right: 15px; padding-left: 15px; } td:nth-child(1)", "= header + fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s def", "{ border-top: none; } </style> </head> <body> <div class=\"outer\" style=\"width: 100%;\"> <div class=\"inner\">", "to read an incantation log) if f != incantations and f != incantations_flat", "= cmds[9] p10.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default name)')", "# Parallel the above functions names = [ 'Different warps, same CTA; global", "test s += t for i in range(0, nc): i1 = (i &", "tests.sort() # Section header s += r'{\\bf ' + sec + '}' +", "suffix for sf, cfg, suf in sfs: s = prefix s = s.replace('<config>',", "help='log (text or pickle)') f = cmds[8] p9.add_argument('-o', '--out', action='store', default=f, help='output file", "default=f) p2.add_argument('-d', '--diro', action='store', default='entries') p2.set_defaults(func=partial(mux, classified)) # Sections p3 = sp.add_parser(cmds[2], parents=[parent])", "s += '<tr>\\n' s += '<td>' + t + '</td>' for i in", "p7.add_argument('input', action=InputAction) f = cmds[6] + '.tex' p7.add_argument('-o', '--out', action='store', default=f) p7.set_defaults(func=partial(mux, latex3))", "out_base + '-' + suf + '.html' w_str(f_out, s) # ------------------------------------------------------------------------------ ####################### #", "itemify(item) assert(type(item) == L) entry = item.pos # ppi_incantations: mem_stress, general_bc, barrier, rand_threads", "s += '<tr>\\n' s += ' <th>Scope tree</th>\\n' s += ' <th>Memory map</th>\\n'", "= ma.get_filtered_keys(f, logs, ks_s) if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name,", "sf, cfg, suf in sfs: s = prefix s = s.replace('<config>', cfg, 1)", "ma.get_matching_keys(val, logs) if pos: ks = ma.get_pos_keys(logs, ks) all_matching += ks if ks:", "prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g g g r r", "<link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style> ul { padding-top: 10px; } li {", "list if not c: inp = [inp] inp = ma.get_logs(inp, lh=ma.Log) if not", "textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html> <head> <meta charset=\"UTF-8\"> <title>GPU Litmus Test Results</title> <link rel=\"stylesheet\"", "are included in the # table) def produce_table(ks, logs, diro='entries'): logs = [", "or pickle)') f = cmds[8] p9.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead", "((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl2(f, logs, n) w_str(args.out, s) # ------------------------------------------------------------------------------", "(i & 0b0100) >> 2 i3 = (i & 0b0010) >> 1 i4", "for k in ks: # Start new row s += '<tr>\\n' le =", "<td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td>", "<td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td>", "</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td> <td> </td><td>", "' & ' + str(entry) s += '\\\\\\\\\\n' s += '\\\\end{tabular}\\n' # Write", "latex_tbl2(f, logs, n) s += '\\n' # Produce d-cta:s-ker table, global memory f", "+ str(entry) s += '\\\\\\\\\\n' s += '\\\\end{tabular}\\n' # Write table to file", "all tests that match a simple test name (like rwc) l2 = list(filter(partial(L.simple_match,", "def produce_table(ks, logs, diro='entries'): logs = [ l for l in logs if", "+ '</ul>\\n' self.last_level -= 1 def new_section(self, heading, level): assert(0 <= level <=", "shared memory f = lambda e: L.is_shared(e) and \\ ((L.is_warp(e) and L.does_match(e, l))", "memory f = lambda e: L.is_shared(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or", "+= str(e.pos) + sep # s2 + global memory f = lambda e:", "= chip while True: chip = os.path.splitext(chip)[0] if chip == chip_old: break chip_old", "lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] out_base = args.out assert(out_base)", "<td>13</td> <td>14</td> <td>15</td> <td>16</td> </tr> <tr> <td>memory&nbsp;stress</td> <td> </td><td> </td><td> </td><td> </td> <td>", "str(entry) + '</td>' s += '</tr>\\n' s += \"\"\" </table> </div> </div> </body>", "<table> <tr> <td> </td> <td>1</td> <td>2</td> <td>3</td> <td>4</td> <td>5</td> <td>6</td> <td>7</td> <td>8</td> <td>9</td>", "machinery as ma from machinery import ErrMsg, chk, bail from machinery import LogEntry", "while True: chip = os.path.splitext(chip)[0] if chip == chip_old: break chip_old = chip", "nc = 16 # Line filters lfs = [ ('uniproc', ['corr', 'corw', 'cowr',", "Html file (including navigation and sections) class HtmlFile: \"\"\"Html file representing litmus test", "memory', 'Mixed scopes, mixed memory' ] return names # Get key patterns per", "def latex(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log) n =", "L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 = [lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)]", "argparse import os import sys import collections import textwrap from functools import partial", "shared memory c(L.is_mixed_scope, L.is_shared), # Mixed scopes, mixed memory c(L.is_mixed_scope, L.is_mixed_mem) ] return", "r r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical Incantations:} &", "ma.get_logs(inp, lh=ma.Log) if not c: inp = inp[0] args.input = inp f(args) ###############", "s += '<tr>\\n' le = ma.get_entry(k, logs) s += le.pp_prefix(2) for log in", "= ma.get_keys(logs) ks_s = set(ks_s) - all_matching ks_s = list(ks_s) if ks_s: h.new_section('Other',", "under 'line filters' in this file # are ignored; non-existing tests and non-existing", "e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Scope and mem", "os.path.basename(log) assert(type(chip) == str) chip_old = chip while True: chip = os.path.splitext(chip)[0] if", "+ '<ul>\\n' self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">' +", "& 0b0001) f1 = fs1[i1] f2 = fs2[i2] f3 = fs3[i3] f4 =", "s += t for i in range(0, nc): i1 = (i & 0b1000)", "# Two-level p4 = sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction) f = cmds[3] +", "p4.set_defaults(func=partial(mux, two_level)) # Latex p5 = sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction) f = cmds[4]", "# HTML suffix after tables suffix = textwrap.dedent(\"\"\" </div> </div> </body> </html> \"\"\")", "l + '><a id=\"id' + str(self.secn) + '\">' + heading + '</a></h'\\ +", "0 self.last_level = -1 def add_nav_item(self, link, level): sp = self.sp li =", "level == self.last_level: self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">'", "for i in range(0, nc): i1 = (i & 0b1000) >> 3 i2", "'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ]) ] return l", "# ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += ' & ' + str(entry)", "+ '<li><a href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' else: assert(False)", "cfg, suf in sfs: s = prefix s = s.replace('<config>', cfg, 1) s", "= prefix s = s.replace('<config>', cfg, 1) s = s.replace('<chip>', chip, 1) l1", "ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro)", "os.path.splitext(chip)[0] if chip == chip_old: break chip_old = chip assert(type(chip) == str) log", "ma.get_pos_keys(logs, ks) all_matching += ks if ks: h.new_section(name, 0) s = produce_table(ks, logs,", "'\\\\\\\\' s = '\\n'.join(l) w_str(args.out, s) ### Produce latex tables def latex3(args): pos", "= list(ks) if ks: h.new_section('Other', 0) ks.sort() s = produce_table(ks, logs) h.add_html(s) h.finish()", "Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation", "Parent of all p = argparse.ArgumentParser() # Dummy parent for common options parent", "= args.input assert(type(logs) == ma.Log) n = 4 l = ['CO', 'Co', 'LB[^+]',", "logs, n): ks = ma.get_filtered_keys(f, logs) sep = ' & ' s =", "and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl(f,", "f1 = fs1[i1] f2 = fs2[i2] f3 = fs3[i3] f4 = fs4[i4] f", "get_section_names() for f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs, ks_s) if", "or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl2(f, logs, n) s += '\\n' #", "style=\"text-align:left\"> <config> </td> </tr> </table> </center> <br> <table> <tr> <td> </td> <td>1</td> <td>2</td>", "('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [ 'SB',", "sep = ' & ' s = '\\midrule\\n' def mapper(k): e = ma.get_entry(k,", "warps, shared memory', 's1-shared'), (lambda e: L.is_cta(e) and L.is_global(e), 'All threads in different", "logs = args.input assert(type(logs) == ma.Log) n = 8 l = ['CO', 'Co',", "]) ] return l # ------------------------------------------------------------------------------ ############ # Toplevel # ############ # f:", "= produce_table(ks, logs, diro=args.diro) h.add_html(s) all_matching = set(all_matching) if pos: ks = ma.get_pos_keys(logs)", "threads in different CTAs, global memory', 's2-global') ] # Column filters fs1 =", "test. </center> <br> <center> <table style=\"border:none\"> <tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip>", "comparing the\\ effectiveness of the incantations') p8.add_argument('input', action=InputAction, help='log (text or pickle)') f", "'--out', action='store', default=f) p6.set_defaults(func=partial(mux, latex2)) # Latex 3 p7 = sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input',", "p8.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default name)') p8.set_defaults(func=partial(mux, incantations))", "'</table>\\n' return s # Filtering according to scopes and memory regions; no filtering", "logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) # Get all the keys if", "R & S & R+S & none & R & S & R+S", "'--out', action='store', default=f) p7.set_defaults(func=partial(mux, latex3)) # Incantations p8 = sp.add_parser(cmds[7], description='Produce tables comparing", "# end: end of line # n: number of elements on line def", "'\\\\\\\\\\n' s += '\\\\end{tabular}\\n' # Write table to file f_out = out_base +", "s += '\\n' # Produce d-warp:s-cta table, shared memory f = lambda e:", "out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names,", "logs which have the key are included in the # table) def produce_table(ks,", "'<td>' + t + '</td>' for i in range(0, nc): i1 = (i", "s += latex_tbl2(f, logs, n) w_str(args.out, s) # ------------------------------------------------------------------------------ ### Produce incantations tables", "sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction) f = cmds[2] + '.html' p3.add_argument('-o', '--out', action='store',", "end: end of line # n: number of elements on line def fill_up(l,", "the corresponding number. The logfile also contains the litmus test code. When a", "'s1-shared'), (lambda e: L.is_cta(e) and L.is_global(e), 'All threads in different CTAs, global memory',", "lambda e: L.is_rand_threads(e)] nc = 16 # Scope and mem filters, table description,", "+= [self.nav] l += self.items + [self.suffix] self.s = ''.join(l) def write(self, fn):", "+ \"\\\\\\\\\\n\" header += '\\midrule\\n' s = header + fill_up(l, sep, '\\\\\\\\\\n', n)", "+= ks_s if ks_s: h.new_section(name, 0) # Now divide by other sections filters", "the keys if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) s =", "sep) # s1 + shared memory f = lambda e: L.is_shared(e) and (L.is_warp(e)", "e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Line filters lfs", "in the # table) def produce_table(ks, logs, diro='entries'): logs = [ l for", "self.nav += sp * self.last_level + '</ul>\\n' self.last_level -= 1 def new_section(self, heading,", "assert(len(log) == 1) log = log[0] # Prefix of output filename, default is", "incantations and f != incantations_flat and f != incantations_html_flat: c = type(inp) is", "li + '<li><a href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' else:", "t + '</td>' for i in range(0, nc): i1 = (i & 0b1000)", "= ' & ' s = '' def mapper(k): e = ma.get_entry(k, logs)", "Simple scopes, global memory c(L.is_warp, L.is_global), c(L.is_cta, L.is_global), c(L.is_ker, L.is_global), # Simple scopes,", "action='store', default=f) p3.add_argument('-d', '--diro', action='store', default='entries') p3.set_defaults(func=partial(mux, sections)) # Two-level p4 = sp.add_parser(cmds[3],", "+ link +\\ '</a></li>\\n' elif level == self.last_level + 1: self.nav += ul", "s += '\\n' # Produce d-cta:s-ker table, global memory f = lambda e:", "if len(sys.argv) == 1: sys.argv += ['-h'] cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds =", "= os.path.basename(log.fn) idx = name.find('.') if idx != -1: name = name[:idx] s", "elements on line def fill_up(l, sep, end, nl): n = len(l) s =", "c: inp = inp[0] args.input = inp f(args) ############### # Subcommands # ###############", "['CoWW', 'COWW']))) s += latex_tbl2(f, logs, n) s += '\\n' # Produce d-cta:s-ker", "1 def new_section(self, heading, level): assert(0 <= level <= 2) l = str(level+2)", "latex(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log) n = 4", "= p.add_subparsers(help='use <subcommand> -h for further help', title= 'subcommands') # Flat p1 =", "[lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Scope and", "'--diro', action='store', default='entries') p1.set_defaults(func=partial(mux, flat)) # Classified p2 = sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+',", "not in cmds: p.print_help() sys.exit(2) print('cmd: ' + cmd) pr = p.parse_args() pr.func(pr)", "<center> <table style=\"border:none\"> <tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip> </td> </tr> <tr", "get_axiom_patterns(): l = [ ('SC per location', ['CO', 'Co']), ('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']),", "<subcommand> -h for further help', title= 'subcommands') # Flat p1 = sp.add_parser(cmds[0], parents=[parent])", "filename suffix sfs = [ (lambda e: L.is_warp(e) and L.is_global(e), 'All threads in", "</td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td>", "e: L.is_mem_stress(e)] fs2 = [lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)] fs3 =", "nav=True): self.close_nav(-1) l = [self.prefix] if nav: l += [self.nav] l += self.items", "</tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td> </td><td>", "</div> </body> </html> \"\"\") def __init__(self): self.items = [] self.nav = '<h4>Contents</h4>\\n' self.secn", "<h1>Evaluating incantations</h1> <br> <center> To view the logfile for a test, click on", "failed or because there were insufficient resources on the chip to run the", "action='store', default=f) p5.set_defaults(func=partial(mux, latex)) # Latex 2 p6 = sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction)", "= cmds[6] + '.tex' p7.add_argument('-o', '--out', action='store', default=f) p7.set_defaults(func=partial(mux, latex3)) # Incantations p8", "prefix = textwrap.dedent(r\"\"\" <!DOCTYPE html> <html style=\"background:white;\"> <head> <meta charset=\"UTF-8\"> <title>Evaluating incantations</title> <link", "name)') p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations html p10 = sp.add_parser(cmds[9], description='Produce flat html tables", "> level: self.nav += sp * self.last_level + '</ul>\\n' self.last_level -= 1 def", "the incantations') p10.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[9] p10.add_argument('-o', '--out',", "def close_nav(self, level): sp = self.sp while self.last_level > level: self.nav += sp", "ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] out_base = args.out", "+ '.html' p2.add_argument('-o', '--out', action='store', default=f) p2.add_argument('-d', '--diro', action='store', default='entries') p2.set_defaults(func=partial(mux, classified)) #", "('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']),", "</td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td> </td><td> </td>", "parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction) f = cmds[1] + '.html' p2.add_argument('-o', '--out', action='store', default=f)", "parents=[parent]) p5.add_argument('input', action=InputAction) f = cmds[4] + '.tex' p5.add_argument('-o', '--out', action='store', default=f) p5.set_defaults(func=partial(mux,", "= \"\" while l: chunk = l[:nl] line = sep.join(chunk) s += line", "<td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr>", "# ------------------------------------------------------------------------------ ### Used by all HTML file producers # ks: list of", "& \\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC}", "g): return lambda e: f(e) and g(e) # List of functions that each", "and f2(e) and f3(e) and f4(e) entry = '-' item = list(filter(f, l2))", "' <th>' + name + '</th>\\n' s += '</tr>\\n' # Process rows for", "self.s = ''.join(l) def write(self, fn): assert(self.s) f = open(fn, 'w') f.write(self.s) f.close()", "chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for sec, tests in lfs.items():", "while l: chunk = l[:nl] line = sep.join(chunk) s += line + ((nl", "= lambda e: L.is_shared(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW',", "def add_html(self, html): self.items.append(html) def finish(self, nav=True): self.close_nav(-1) l = [self.prefix] if nav:", "cmds = ['flat', 'classified', 'sections', 'two-level', 'latex', 'latex2', 'latex3', 'incantations', 'incantations-flat', 'incantations-html'] p", "10px; } li { padding-top: 5px; } th, td { text-align: right; padding:", "<td>11</td> <td>12</td> <td>13</td> <td>14</td> <td>15</td> <td>16</td> </tr> <tr> <td>memory&nbsp;stress</td> <td> </td><td> </td><td> </td><td>", "L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl(f, logs, n) s +=", "description='Produce tables comparing the\\ effectiveness of the incantations') p8.add_argument('input', action=InputAction, help='log (text or", "</td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\") #", "h.new_section('Other', 0) ks.sort() s = produce_table(ks, logs) h.add_html(s) h.finish() h.write(args.out) ### Two level", "combined to yield a single column # filter) fs1 = [lambda e: not", "memory c(L.is_mixed_scope, L.is_shared), # Mixed scopes, mixed memory c(L.is_mixed_scope, L.is_mixed_mem) ] return d", "+= \"\"\" </table> </div> </div> </body> </html> \"\"\" # Write table to file", "mem filters, table description, filename suffix for sf, cfg, suf in sfs: s", "general_bc, barrier, rand_threads s += ' & ' + str(entry) s += '\\\\\\\\\\n'", "file basename (instead of default name)') p8.set_defaults(func=partial(mux, incantations)) # Incantations flat p9 =", "= name.find('.') if idx != -1: name = name[:idx] s += ' <th>'", "if chip == chip_old: break chip_old = chip assert(type(chip) == str) # Get", "item.pos s += item.pp_cell_link_dir(2, args.diro) # Produce file containing raw litmus log item.store_log_dir(args.diro)", "assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) s = '' h = HtmlFile() filters = get_section_filters()", "name + '</th>\\n' s += '</tr>\\n' # Process rows for k in ks:", "or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl(f, logs, n) s += '\\n' #", "# Read ordinary logs (if we do not want to read an incantation", "mixed memory' ] return names # Get key patterns per axiom def get_axiom_patterns():", "'COWW']))) s += latex_tbl(f, logs, n) s += '\\n' # Produce d-cta:s-ker table,", "= lambda e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl2(f,", "# ############ # f: function to be called; args: arguments to the function", "and L.is_global(e), 'All threads in different CTAs, global memory', 's2-global') ] # Column", "<th>Name</th>\\n' for log in logs: # Remove directory prefix and suffix name =", "or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl(f, logs, n) s += '\\n' #", "</html> \"\"\") def __init__(self): self.items = [] self.nav = '<h4>Contents</h4>\\n' self.secn = 0", "memory f = lambda e: L.is_global(e) and (L.is_warp(e) or L.does_match(e, lc)) ks1 =", "classified(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l =", "cmds[7] p8.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default name)') p8.set_defaults(func=partial(mux,", "* (level + 1) ul = sp * (self.last_level + 1) if level", "+ '-' + suf + '.html' w_str(f_out, s) # ------------------------------------------------------------------------------ ####################### # Command", "interleave(l1, l2, n) s = fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return", "two_level)) # Latex p5 = sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction) f = cmds[4] +", "'}' + (' &' * nc) + r'\\\\' + '\\n' for t in", "e = ma.get_entry(k, logs) return e.short_name.lower() + sep + str(e.pos) l = list(map(mapper,", "= 16 # Scope and mem filters, table description, filename suffix for sf,", "non-existing tests and non-existing entries (e.g. for a certain # combination of incantations)", "= itemify(item) assert(type(item) == L) entry = item.pos s += item.pp_cell_link_dir(2, args.diro) #", "of functions that each take a log entry d = [ # Simple", "'-' item = list(filter(f, l2)) if item: item = itemify(item) assert(type(item) == L)", "appears instead of a result, it is either because optcheck failed or because", "\"Freq.\"] * n) + \"\\\\\\\\\\n\" header += '\\midrule\\n' s = header + fill_up(l,", "s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce table with sections", "the function def mux(f, args): inp = args.input l = list(listify(inp)) if hasattr(args,", "] return names # Get key patterns per axiom def get_axiom_patterns(): l =", "name, val in l: ks = ma.get_matching_keys(val, logs) if pos: ks = ma.get_pos_keys(logs,", "len(chunk)) * sep) + end l = l[nl:] return s def latex_tbl(f, logs,", "mixed memory', 'Mixed scopes, global memory', 'Mixed scopes, shared memory', 'Mixed scopes, mixed", "<center> To view the logfile for a test and chip, click on the", "'.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_flat(args): log", "and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl(f, logs, n) s", "# Process header s += '<tr>\\n' s += ' <th>Scope tree</th>\\n' s +=", "== str) # Get chip name chip = os.path.basename(log) assert(type(chip) == str) chip_old", "item.store_log_dir(args.diro) else: # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += '<td>' + str(entry)", "Names + s1 + global memory f = lambda e: L.is_global(e) and (L.is_warp(e)", "self.nav += ul + '<ul>\\n' self.nav += li + '<li><a href=\"#id' + str(self.secn)", "+ suf + '.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables", "chk, bail from machinery import LogEntry as L from generic import lty, interleave,", "<tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config> </td> </tr> </table> </center> <br> <table>", "s = produce_table(ks, logs, diro=args.diro) h.add_html(s) # Rest all_matching = set(all_matching) if pos:", "= ma.get_filtered_keys(f, logs, ks) ks3.sort() assert(len(ks3) == n) for i, k in enumerate(ks3):", "r r r r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical", "# Produce d-cta:s-ker table, global memory f = lambda e: L.is_global(e) and \\", "</tr> <tr> <td>memory&nbsp;stress</td> <td> </td><td> </td><td> </td><td> </td> <td> </td><td> </td><td> </td><td> </td>", "rows for k in ks: # Start new row s += '<tr>\\n' le", "and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl2(f, logs, n) s", "inp = args.input l = list(listify(inp)) if hasattr(args, 'out'): l.append(args.out) chk(not dupchk(l), 'duplicate", "Produce d-warp:s-cta table, shared memory f = lambda e: L.is_shared(e) and \\ ((L.is_warp(e)", "i, k in enumerate(ks3): e = ma.get_entry(k, logs) l[i] += str(e.pos) + '\\\\\\\\'", "len(sys.argv) == 1: sys.argv += ['-h'] cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds = ['flat',", "incantations</h1> <br> <center> To view the logfile for a test, click on the", "in different warps, global memory', 's1-global'), (lambda e: L.is_warp(e) and L.is_shared(e), 'All threads", "((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl(f, logs, n) w_str(args.out, s) def latex2(args):", "s = s.replace('<chip>', chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for sec,", "default=f) p1.add_argument('-d', '--diro', action='store', default='entries') p1.set_defaults(func=partial(mux, flat)) # Classified p2 = sp.add_parser(cmds[1], parents=[parent])", "do not want to read an incantation log) if f != incantations and", "entry d = [ # Simple scopes, global memory c(L.is_warp, L.is_global), c(L.is_cta, L.is_global),", "+ '\">' + link +\\ '</a></li>\\n' elif level < self.last_level: self.close_nav(level) self.nav +=", "L.is_shared), # Simple scopes, mixed memory c(L.is_warp, L.is_mixed_mem), # Mixed scopes, global memory", "of default name)') p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations html p10 = sp.add_parser(cmds[9], description='Produce flat", "# Simple scopes, global memory c(L.is_warp, L.is_global), c(L.is_cta, L.is_global), c(L.is_ker, L.is_global), # Simple", "ks = ma.get_pos_keys(logs, ks) all_matching += ks if ks: h.new_section(name, 0) s =", "item: item = itemify(item) assert(type(item) == L) entry = item.pos s += item.pp_cell_link_dir(2,", "assert(hasattr(args, 'diro')) l = get_axiom_patterns() h = HtmlFile() all_matching = [] for name,", "</td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td>", "action=InputAction, help='log (text or pickle)') f = cmds[7] p8.add_argument('-o', '--out', action='store', default=f, help='output", "+= '\\\\\\\\\\n' s += '\\\\end{tabular}\\n' # Write table to file f_out = out_base", "s += latex_tbl(f, logs, n) s += '\\n' # Produce d-cta:s-ker table, global", "ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) #", "for f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs) if pos: ks", "Parallel the above functions names = [ 'Different warps, same CTA; global memory',", "i1 = (i & 0b1000) >> 3 i2 = (i & 0b0100) >>", "ErrMsg, chk, bail from machinery import LogEntry as L from generic import lty,", "'corw', 'cowr', 'coww']), ('observation', ['mp', 'isa2', 'wrc']), ('prop light', ['2+2w', 'w+rw+2w', 's']), ('prop", "+= ' <th>' + name + '</th>\\n' s += '</tr>\\n' # Process rows", "+ str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif level == self.last_level +", "'Co']), ('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W',", "= get_section_names() for f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs, ks_s)", "& \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\ &", "also ignored def incantations(args): log = args.input assert(type(log) == str) # Get chip", "= args.out assert(out_base) les = log.get_all() assert(lty(les, L)) # Table header prefix =", "action=InputAction) f = cmds[1] + '.html' p2.add_argument('-o', '--out', action='store', default=f) p2.add_argument('-d', '--diro', action='store',", "ks if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) all_matching =", "same kernel; global memory', 'Different kernels, same device; global memory', 'Different warps, same", "ks_s = ma.get_pos_keys(logs) else: ks_s = ma.get_keys(logs) ks_s = set(ks_s) - all_matching ks_s", "+= '\\\\\\\\\\n' s += '\\\\hline\\n' s += '\\\\end{tabular}\\n' # Write table to file", "regions def sections(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro'))", "self.last_level + '</ul>\\n' self.last_level -= 1 def new_section(self, heading, level): assert(0 <= level", "'-' + suf + '.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat incantation", "</center> <br> <center> <table style=\"border:none\"> <tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip> </td>", "effectiveness of the incantations') p10.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[9]", "s def latex_tbl2(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep = ' &", "+= li + '<li><a href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n'", "break chip_old = chip assert(type(chip) == str) log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc))", "# Remove directory prefix and suffix name = os.path.basename(log.fn) idx = name.find('.') if", "f = cmds[4] + '.tex' p5.add_argument('-o', '--out', action='store', default=f) p5.set_defaults(func=partial(mux, latex)) # Latex", "<link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head> <body> <div class=\"outer\"> <div class=\"inner\"> <h1>GPU Litmus", "= (i & 0b1000) >> 3 i2 = (i & 0b0100) >> 2", "'<tr>\\n' le = ma.get_entry(k, logs) s += le.pp_prefix(2) for log in logs: e", "filters = get_section_filters() names = get_section_names() for f, name in zip(filters, names): ks", "= args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l = get_axiom_patterns() h", "+ '.html' p4.add_argument('-o', '--out', action='store', default=f) p4.add_argument('-d', '--diro', action='store', default='entries') p4.set_defaults(func=partial(mux, two_level)) #", "'.tex' p7.add_argument('-o', '--out', action='store', default=f) p7.set_defaults(func=partial(mux, latex3)) # Incantations p8 = sp.add_parser(cmds[7], description='Produce", "] # Column filter building blocks (need to be combined to yield a", "</td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td>", "+ sep + \"Freq.\"] * n) + \"\\\\\\\\\\n\" header += '\\midrule\\n' s =", "\\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra Incantations:} &", "= '' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower() + sep +", "rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head> <body> <div class=\"outer\"> <div class=\"inner\"> <h1>GPU Litmus Test", "</tr> <tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config> </td> </tr> </table> </center> <br>", "'</tr>\\n' s += '</table>\\n' return s # Filtering according to scopes and memory", "suf + '.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables def", "['CoWW', 'COWW'] ks = ma.get_matching_keys(l, logs) # Names + s1 + global memory", "diro) # Produce file containing raw litmus log e.store_log_dir(diro) else: s += '<td><a", "ks: h.new_section('Other', 0) ks.sort() s = produce_table(ks, logs) h.add_html(s) h.finish() h.write(args.out) ### Two", "c(L.is_warp, L.is_shared), # Simple scopes, mixed memory c(L.is_warp, L.is_mixed_mem), # Mixed scopes, global", "s += latex_tbl2(f, logs, n) s += '\\n' # Produce d-cta:s-ker table, global", "= (i & 0b0010) >> 1 i4 = (i & 0b0001) f1 =", "or L.does_match(e, lc)) ks1 = ma.get_filtered_keys(f, logs, ks) ks1.sort() n = len(ks1) l", "</td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td> </td><td>", "tables # All tests that are not explicitely listed under 'line filters' in", "in tests: # Get all tests that match a simple test name (like", "heading, level): assert(0 <= level <= 2) l = str(level+2) s = '<h'", "test, click on the corresponding number. The logfile also contains the litmus test", "cmds[0] + '.html' p1.add_argument('-o', '--out', action='store', default=f) p1.add_argument('-d', '--diro', action='store', default='entries') p1.set_defaults(func=partial(mux, flat))", "'RWC', 'IRIW' ]) ] return l # ------------------------------------------------------------------------------ ############ # Toplevel # ############", "1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce table with", "out_base + '-' + suf + '.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce", "level def close_nav(self, level): sp = self.sp while self.last_level > level: self.nav +=", "sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction) f = cmds[3] + '.html' p4.add_argument('-o', '--out', action='store',", "Prefix of output filename, default is the command name out_base = args.out assert(out_base)", "+ heading + '</a></h'\\ + l + '>\\n' self.items.append(s) self.add_nav_item(heading, level) self.secn +=", "'\\\\bottomrule\\n' return s def latex_tbl2(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep =", "dash appears instead of a result, it is either because optcheck failed or", "hasattr(args, 'out'): l.append(args.out) chk(not dupchk(l), 'duplicate files given') # Read ordinary logs (if", "log = log[0] out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L)) #", "each take a log entry d = [ # Simple scopes, global memory", "'<li><a href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif level ==", "r r r r g g g g r r r r} \\toprule", "the table # logs: list of log objects (only logs which have the", "ma.get_entry(k, logs) s += le.pp_prefix(2) for log in logs: e = log.get(k) if", "= lambda e: L.is_global(e) and (L.is_cta(e) or L.does_match(e, lc)) ks3 = ma.get_filtered_keys(f, logs,", "f: function to be called; args: arguments to the function def mux(f, args):", "= ma.get_logs(inp, lh=ma.Log) if not c: inp = inp[0] args.input = inp f(args)", "+ '.html' w_str(f_out, s) # ------------------------------------------------------------------------------ ####################### # Command line parser # #######################", "e: L.is_barrier(e)] fs4 = [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc =", "per location', ['CO', 'Co']), ('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation", "s += '</tr>\\n' s += \"\"\" </table> </div> </div> </body> </html> \"\"\" #", "name = name[:idx] s += ' <th>' + name + '</th>\\n' s +=", "= latex_tbl2(f, logs, n) s += '\\n' # Produce d-warp:s-cta table, shared memory", "ma.get_entry(k, logs) l.append(e.short_name.lower() + sep + str(e.pos) + sep) # s1 + shared", "short_names.sort() # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g", "pos: ks_s = ma.get_pos_keys(logs, ks_s) all_matching += ks_s if ks_s: h.new_section(name, 0) #", "l))) s += latex_tbl(f, logs, n) w_str(args.out, s) def latex2(args): pos = args.pos", "'latex3', 'incantations', 'incantations-flat', 'incantations-html'] p = get_cmdline_parser(cmds) if cmd not in cmds: p.print_help()", "generic import lty, interleave, itemify, dupchk, listify, w_str # ------------------------------------------------------------------------------ # Html file", "p10.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[9] p10.add_argument('-o', '--out', action='store', default=f,", "Results</h1> <br> <center> To view the logfile for a test and chip, click", "Now divide by other sections filters = get_section_filters() names = get_section_names() for f,", "l: list of items # sep: separator # end: end of line #", "e.short_name.lower() + sep + str(e.pos) l = list(map(mapper, ks)) header = sep.join([\"Test\" +", "for a test, click on the corresponding number. The logfile also contains the", "ks_s = ma.get_matching_keys(val, logs) if pos: ks_s = ma.get_pos_keys(logs, ks_s) all_matching += ks_s", "8 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]',", "'.tex' p6.add_argument('-o', '--out', action='store', default=f) p6.set_defaults(func=partial(mux, latex2)) # Latex 3 p7 = sp.add_parser(cmds[6],", "args.input assert(type(log) == str) # Get chip name chip = os.path.basename(log) assert(type(chip) ==", "incantations_flat)) # Incantations html p10 = sp.add_parser(cmds[9], description='Produce flat html tables comparing\\ the", "(' &' * nc) + r'\\\\' + '\\n' for t in tests: #", "assert(type(item) == L) entry = item.pos # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s", "} </style> </head> <body> <div class=\"outer\" style=\"width: 100%;\"> <div class=\"inner\"> <h1>Evaluating incantations</h1> <br>", "'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] # Produce d-warp:s-cta table, global memory f = lambda e:", "assert(hasattr(args, 'diro')) s = '' h = HtmlFile() filters = get_section_filters() names =", "for l in logs if l.any_key(ks) ] s = '<table>\\n' # Process header", "regions; no filtering according to # names def get_section_filters(): def c(f, g): return", "ma.get_filtered_keys(f, logs) sep = ' & ' s = '\\midrule\\n' def mapper(k): e", "sp * self.last_level + '</ul>\\n' self.last_level -= 1 def new_section(self, heading, level): assert(0", "memory', 's2-global') ] # Column filter building blocks (need to be combined to", "</td><td> </td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td>", "sep = ' & ' l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]',", "description='Produce flat html tables comparing\\ the effectiveness of the incantations') p10.add_argument('input', action=InputAction, help='log", "CTA; mixed memory', 'Mixed scopes, global memory', 'Mixed scopes, shared memory', 'Mixed scopes,", "= get_section_names() for f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs) if", "machinery import ErrMsg, chk, bail from machinery import LogEntry as L from generic", "def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower(), str(e.pos) l = list(map(mapper, ks))", "rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style> ul { padding-top: 10px; } li { padding-top:", "cmds[8] p9.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default name)') p9.set_defaults(func=partial(mux,", "have the key are included in the # table) def produce_table(ks, logs, diro='entries'):", "\\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\ & none", "command name out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L)) short_names =", "# Produce file containing raw litmus log item.store_log_dir(args.diro) else: # ppi_incantations: mem_stress, general_bc,", "'.html' w_str(f_out, s) # ------------------------------------------------------------------------------ ####################### # Command line parser # ####################### #", "flat)) # Classified p2 = sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction) f = cmds[1]", "</table> </center> <br> <table> <tr> <td> </td> <td>1</td> <td>2</td> <td>3</td> <td>4</td> <td>5</td> <td>6</td>", "global memory f = lambda e: L.is_global(e) and (L.is_cta(e) or L.does_match(e, lc)) ks3", "r r g g g g r r r r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\", "lambda e: L.is_global(e) and (L.is_warp(e) or L.does_match(e, lc)) ks1 = ma.get_filtered_keys(f, logs, ks)", "= [ l for l in logs if l.any_key(ks) ] s = '<table>\\n'", "if item: item = itemify(item) assert(type(item) == L) entry = item.pos # ppi_incantations:", "suf + '.html' w_str(f_out, s) # ------------------------------------------------------------------------------ ####################### # Command line parser #", "= sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction) f = cmds[6] + '.tex' p7.add_argument('-o', '--out', action='store',", "l[i] += str(e.pos) + sep # s2 + global memory f = lambda", "lambda e: f(e) and g(e) # List of functions that each take a", "classified)) # Sections p3 = sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction) f = cmds[2]", "latex3(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log) n = 8", "rand_threads s += '<td>' + str(entry) + '</td>' s += '</tr>\\n' s +=", "location', ['CO', 'Co']), ('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation Light',", "str)) short_names.sort() # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g", "assert(hasattr(args, 'diro')) # Get all the keys if pos: ks = ma.get_pos_keys(logs) else:", "and memory regions def sections(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log))", "<br> <center> <table style=\"border:none\"> <tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip> </td> </tr>", "end of line # n: number of elements on line def fill_up(l, sep,", "str(level+2) s = '<h' + l + '><a id=\"id' + str(self.secn) + '\">'", "{ text-align: left; } tr:nth-child(1), tr:nth-child(5) { border-bottom: 2px solid black; } table", "fs4 = [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 #", "HtmlFile() all_matching = [] for name, val in l: ks = ma.get_matching_keys(val, logs)", "logs, diro=args.diro) h.add_html(s) all_matching = set(all_matching) if pos: ks = ma.get_pos_keys(logs) else: ks", "files and parse or unpickle class InputAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None):", "<div class=\"outer\" style=\"width: 100%;\"> <div class=\"inner\"> <h1>Evaluating incantations</h1> <br> <center> To view the", "'IRIW[^+]'] # Produce d-warp:s-cta table, global memory f = lambda e: L.is_global(e) and", "def incantations_flat(args): log = args.input assert(type(log) == str) chip = os.path.basename(log) assert(type(chip) ==", "for a certain # combination of incantations) are also ignored def incantations(args): log", "style=\"background:white;\"> <head> <meta charset=\"UTF-8\"> <title>Evaluating incantations</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style> ul", "action=InputAction, help='log (text or pickle)') f = cmds[8] p9.add_argument('-o', '--out', action='store', default=f, help='output", "item = list(filter(f, l2)) if item: item = itemify(item) assert(type(item) == L) entry", "column # filter) fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2", "Used by all HTML file producers # ks: list of test names to", "L.is_global(e), 'All threads in different CTAs, global memory', 's2-global') ] # Column filters", "s += '<td>' + str(entry) + '</td>' s += '</tr>\\n' s += \"\"\"", "p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations html p10 = sp.add_parser(cmds[9], description='Produce flat html tables comparing\\", "+ str(self.secn) + '\">' + link +\\ '</a></li>\\n' else: assert(False) self.last_level = level", "s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_html_flat(args): log = args.input", "a certain # combination of incantations) are also ignored def incantations(args): log =", "L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl2(f, logs, n) s +=", "& S & R+S & none & R & S & R+S &", "' & ' + str(entry) s += '\\\\\\\\\\n' s += '\\\\hline\\n' s +=", "action=InputAction, help='log (text or pickle)') f = cmds[9] p10.add_argument('-o', '--out', action='store', default=f, help='output", "action='store', default='entries') p4.set_defaults(func=partial(mux, two_level)) # Latex p5 = sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction) f", "= textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g g g r r r", "td:nth-child(1) { text-align: left; } tr:nth-child(1), tr:nth-child(5) { border-bottom: 2px solid black; }", "1) ul = sp * (self.last_level + 1) if level == self.last_level: self.nav", "default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return p if __name__ == \"__main__\": if len(sys.argv) == 1:", "= HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------ ### Fill up table line by", "1) if level == self.last_level: self.nav += li + '<li><a href=\"#id' + str(self.secn)", "ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro)", "r g g g g r r r r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU", "l = [self.prefix] if nav: l += [self.nav] l += self.items + [self.suffix]", "item = itemify(item) assert(type(item) == L) entry = item.pos # ppi_incantations: mem_stress, general_bc,", "nargs='+', action=InputAction) f = cmds[3] + '.html' p4.add_argument('-o', '--out', action='store', default=f) p4.add_argument('-d', '--diro',", "write(self, fn): assert(self.s) f = open(fn, 'w') f.write(self.s) f.close() # ------------------------------------------------------------------------------ ### Used", "------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_html_flat(args): log = args.input assert(type(log) ==", "= ['flat', 'classified', 'sections', 'two-level', 'latex', 'latex2', 'latex3', 'incantations', 'incantations-flat', 'incantations-html'] p =", "+= '\\n' # Produce d-cta:s-ker table, global memory f = lambda e: L.is_global(e)", "= ma.get_entry(k, logs) l.append(e.short_name.lower() + sep + str(e.pos) + sep) # s1 +", "'All threads in different CTAs, global memory', 's2-global') ] # Column filters fs1", "\\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\ & none & R &", "called; args: arguments to the function def mux(f, args): inp = args.input l", "interleave, itemify, dupchk, listify, w_str # ------------------------------------------------------------------------------ # Html file (including navigation and", "# Get incantation log log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1)", "import textwrap from functools import partial import machinery as ma from machinery import", "'Different kernels, same device; global memory', 'Different warps, same CTA; shared memory', 'Different", "log = log[0] # Prefix of output filename, default is the command name", "assert(lty(short_names, str)) short_names.sort() # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l", "charset=\"UTF-8\"> <title>GPU Litmus Test Results</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head> <body> <div", "i3 = (i & 0b0010) >> 1 i4 = (i & 0b0001) f1", "chip assert(type(chip) == str) log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1)", "f(e) and g(e) # List of functions that each take a log entry", "str(self.secn) + '\">' + heading + '</a></h'\\ + l + '>\\n' self.items.append(s) self.add_nav_item(heading,", "[ l for l in logs if l.any_key(ks) ] s = '<table>\\n' #", "['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [", "h.add_html(s) # Rest all_matching = set(all_matching) if pos: ks_s = ma.get_pos_keys(logs) else: ks_s", "in l: ks_s = ma.get_matching_keys(val, logs) if pos: ks_s = ma.get_pos_keys(logs, ks_s) all_matching", "logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce flat table with all tests def", "= set(all_matching) if pos: ks_s = ma.get_pos_keys(logs) else: ks_s = ma.get_keys(logs) ks_s =", "'s2-global') ] # Column filters fs1 = [lambda e: not L.is_mem_stress(e), lambda e:", "threads in different warps, shared memory', 's1-shared'), (lambda e: L.is_cta(e) and L.is_global(e), 'All", "+ '.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_flat(args):", "<td> </td> <td>1</td> <td>2</td> <td>3</td> <td>4</td> <td>5</td> <td>6</td> <td>7</td> <td>8</td> <td>9</td> <td>10</td> <td>11</td>", "different CTAs, global memory', 's2-global') ] # Column filter building blocks (need to", "& R+S & none & R & S & R+S\\\\ \\hline \"\"\") #", "\\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g g g r r r r g", "sections according to axioms def classified(args): pos = args.pos logs = args.input assert(lty(logs,", "# ks: list of test names to include in the table # logs:", "s = fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s ### Produce", "def incantations_html_flat(args): log = args.input assert(type(log) == str) assert(hasattr(args, 'diro')) chip = os.path.basename(log)", "= chip assert(type(chip) == str) log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) ==", "ks) ks2.sort() assert(len(ks2) == n) for i, k in enumerate(ks2): e = ma.get_entry(k,", "l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl2(f, logs, n) s += '\\n'", "= cmds[2] + '.html' p3.add_argument('-o', '--out', action='store', default=f) p3.add_argument('-d', '--diro', action='store', default='entries') p3.set_defaults(func=partial(mux,", "</head> <body> <div class=\"outer\"> <div class=\"inner\"> <h1>GPU Litmus Test Results</h1> <br> <center> To", "function to be called; args: arguments to the function def mux(f, args): inp", "sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s def latex_tbl2(f, logs, n): ks", "be combined to yield a single column # filter) fs1 = [lambda e:", "'Mixed scopes, global memory', 'Mixed scopes, shared memory', 'Mixed scopes, mixed memory' ]", "Mixed scopes, shared memory c(L.is_mixed_scope, L.is_shared), # Mixed scopes, mixed memory c(L.is_mixed_scope, L.is_mixed_mem)", "= 0 self.last_level = -1 def add_nav_item(self, link, level): sp = self.sp li", "4 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]',", "'\\n'.join(l) w_str(args.out, s) ### Produce latex tables def latex3(args): pos = args.pos logs", "default name)') p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations html p10 = sp.add_parser(cmds[9], description='Produce flat html", "'&#x2713;': checkmark prefix = textwrap.dedent(r\"\"\" <!DOCTYPE html> <html style=\"background:white;\"> <head> <meta charset=\"UTF-8\"> <title>Evaluating", "e: f1(e) and f2(e) and f3(e) and f4(e) entry = '-' item =", "L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl(f, logs, n) s +=", "'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] # Produce d-warp:s-cta table, global memory f =", "Latex 2 p6 = sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction) f = cmds[5] + '.tex'", "logs) l.append(e.short_name.lower() + sep + str(e.pos) + sep) # s1 + shared memory", "p8.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[7] p8.add_argument('-o', '--out', action='store', default=f,", "& ' + str(entry) s += '\\\\\\\\\\n' s += '\\\\end{tabular}\\n' # Write table", "s) ### Produce latex tables def latex3(args): pos = args.pos logs = args.input", "and mem filters, including table description and filename suffix sfs = [ (lambda", "<td>2</td> <td>3</td> <td>4</td> <td>5</td> <td>6</td> <td>7</td> <td>8</td> <td>9</td> <td>10</td> <td>11</td> <td>12</td> <td>13</td> <td>14</td>", "+= item.pp_cell_link_dir(2, args.diro) # Produce file containing raw litmus log item.store_log_dir(args.diro) else: #", "action='store_true') # Subparsers sp = p.add_subparsers(help='use <subcommand> -h for further help', title= 'subcommands')", "ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) ks = set(ks) - all_matching ks", "logs, ks) ks1.sort() n = len(ks1) l = list() for i, k in", "n) for i, k in enumerate(ks3): e = ma.get_entry(k, logs) l[i] += str(e.pos)", "incantations tables # All tests that are not explicitely listed under 'line filters'", "= list(filter(sf, les)) assert(lty(l1, L)) for t in short_names: l2 = list(filter(partial(L.simple_match, s=t),", "description='Produce flat tables comparing the\\ effectiveness of the incantations') p9.add_argument('input', action=InputAction, help='log (text", "pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l = get_axiom_patterns()", "['CO', 'Co']), ('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation Light', ['2\\+2W',", "action='store', default=f) p2.add_argument('-d', '--diro', action='store', default='entries') p2.set_defaults(func=partial(mux, classified)) # Sections p3 = sp.add_parser(cmds[2],", "= args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) # Get all the", "instead of a result, it is either because optcheck failed or because there", "f = lambda e: L.is_shared(e) and (L.is_warp(e) or L.does_match(e, lc)) ks2 = ma.get_filtered_keys(f,", "r r r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical Incantations:}", "+ str(e.pos) l = list(map(mapper, ks)) header = sep.join([\"Test\" + sep + \"Freq.\"]", "ma.get_filtered_keys(f, logs, ks_s) if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 1)", "incantations_html_flat)) return p if __name__ == \"__main__\": if len(sys.argv) == 1: sys.argv +=", "+= sp * self.last_level + '</ul>\\n' self.last_level -= 1 def new_section(self, heading, level):", "style=\"border:none\"> <tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip> </td> </tr> <tr style=\"border:none\"> <td", "['CoWW', 'COWW']))) s += latex_tbl(f, logs, n) s += '\\n' # Produce d-cta:s-ker", "ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) all_matching = set(all_matching) if", "litmus test results\"\"\" sp = ' ' # HTML prefix before tables prefix", "parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction) f = cmds[0] + '.html' p1.add_argument('-o', '--out', action='store', default=f)", "shared memory c(L.is_warp, L.is_shared), # Simple scopes, mixed memory c(L.is_warp, L.is_mixed_mem), # Mixed", "'cowr', 'coww']), ('observation', ['mp', 'isa2', 'wrc']), ('prop light', ['2+2w', 'w+rw+2w', 's']), ('prop heavy',", "inp = inp[0] args.input = inp f(args) ############### # Subcommands # ############### ###", "names = get_section_names() for f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs,", "to run the test. </center> <br> <center> <table style=\"border:none\"> <tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td>", "f = cmds[0] + '.html' p1.add_argument('-o', '--out', action='store', default=f) p1.add_argument('-d', '--diro', action='store', default='entries')", "sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s ### Produce latex tables def", "<td>9</td> <td>10</td> <td>11</td> <td>12</td> <td>13</td> <td>14</td> <td>15</td> <td>16</td> </tr> <tr> <td>memory&nbsp;stress</td> <td> </td><td>", "ks = ma.get_matching_keys(val, logs) if pos: ks = ma.get_pos_keys(logs, ks) all_matching += ks", "nl): n = len(l) s = \"\" while l: chunk = l[:nl] line", "'Different warps, same CTA; global memory', 'Different CTAs, same kernel; global memory', 'Different", "f = lambda e: L.is_global(e) and (L.is_warp(e) or L.does_match(e, lc)) ks1 = ma.get_filtered_keys(f,", "'COWW']))) s = latex_tbl2(f, logs, n) s += '\\n' # Produce d-warp:s-cta table,", "Name of test s += '<tr>\\n' s += '<td>' + t + '</td>'", "h = HtmlFile() filters = get_section_filters() names = get_section_names() for f, name in", "('uniproc', ['corr', 'corw', 'cowr', 'coww']), ('observation', ['mp', 'isa2', 'wrc']), ('prop light', ['2+2w', 'w+rw+2w',", "different warps, global memory', 's1-global'), (lambda e: L.is_warp(e) and L.is_shared(e), 'All threads in", "# l: list of items # sep: separator # end: end of line", "corresponding number. The logfile also contains the litmus test code. When a dash", "logs) if pos: ks = ma.get_pos_keys(logs, ks) all_matching += ks if ks: h.new_section(name,", "ks1.sort() n = len(ks1) l = list() for i, k in enumerate(ks1): e", "producers # ks: list of test names to include in the table #", "sep.join(chunk) s += line + ((nl - len(chunk)) * sep) + end l", "+ end l = l[nl:] return s def latex_tbl(f, logs, n): ks =", "threads in different warps, global memory', 's1-global'), (lambda e: L.is_warp(e) and L.is_shared(e), 'All", "i2 = (i & 0b0100) >> 2 i3 = (i & 0b0010) >>", "the key are included in the # table) def produce_table(ks, logs, diro='entries'): logs", "style=\"width: 100%;\"> <div class=\"inner\"> <h1>Evaluating incantations</h1> <br> <center> To view the logfile for", "h = HtmlFile() all_matching = [] for name, val in l: ks_s =", "args): inp = args.input l = list(listify(inp)) if hasattr(args, 'out'): l.append(args.out) chk(not dupchk(l),", "\\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g g g r r r r g g", "' <th>Memory map</th>\\n' s += ' <th>Name</th>\\n' for log in logs: # Remove", "ks = ma.get_filtered_keys(f, logs) sep = ' & ' s = '' def", "Produce d-cta:s-ker table, global memory f = lambda e: L.is_global(e) and \\ ((L.is_cta(e)", "] s = '<table>\\n' # Process header s += '<tr>\\n' s += '", "self.dest, values) def get_cmdline_parser(cmds): # Parent of all p = argparse.ArgumentParser() # Dummy", "= HtmlFile() all_matching = [] for name, val in l: ks = ma.get_matching_keys(val,", "number. The logfile also contains the litmus test code. When a dash appears", "<br> <center> To view the logfile for a test, click on the corresponding", "Incantations:}\\\\ & none & R & S & R+S & none & R", "'.html' p4.add_argument('-o', '--out', action='store', default=f) p4.add_argument('-d', '--diro', action='store', default='entries') p4.set_defaults(func=partial(mux, two_level)) # Latex", "= produce_table(ks, logs, diro=args.diro) h = HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------ ###", "s += r'{\\bf ' + sec + '}' + (' &' * nc)", "prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html> <head> <meta charset=\"UTF-8\"> <title>GPU Litmus Test Results</title>", "= chip assert(type(chip) == str) # Get incantation log log = ma.get_logs(log, lh=ma.LogInc)", "= [lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)] fs3 = [lambda e: not", "!= incantations_html_flat: c = type(inp) is list if not c: inp = [inp]", "= inp[0] args.input = inp f(args) ############### # Subcommands # ############### ### Produce", "c(L.is_mixed_scope, L.is_mixed_mem) ] return d def get_section_names(): # Parallel the above functions names", "output filename, default is the command name out_base = args.out assert(out_base) les =", "ks: # Start new row s += '<tr>\\n' le = ma.get_entry(k, logs) s", "log item.store_log_dir(args.diro) else: # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += '<td>' +", "!= incantations_flat and f != incantations_html_flat: c = type(inp) is list if not", "logs) return e.short_name.lower(), str(e.pos) l = list(map(mapper, ks)) l1, l2 = zip(*l) l", "and non-existing entries (e.g. for a certain # combination of incantations) are also", "<td>12</td> <td>13</td> <td>14</td> <td>15</td> <td>16</td> </tr> <tr> <td>memory&nbsp;stress</td> <td> </td><td> </td><td> </td><td> </td>", "= l[nl:] return s def latex_tbl(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep", "args.out assert(out_base) les = log.get_all() assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort()", "def mux(f, args): inp = args.input l = list(listify(inp)) if hasattr(args, 'out'): l.append(args.out)", "not explicitely listed under 'line filters' in this file # are ignored; non-existing", "ks) if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out)", "href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif level < self.last_level:", "[ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ]) ] return l # ------------------------------------------------------------------------------ ############ #", "def write(self, fn): assert(self.s) f = open(fn, 'w') f.write(self.s) f.close() # ------------------------------------------------------------------------------ ###", "ma.LogInc)) assert(len(log) == 1) log = log[0] # Prefix of output filename, default", "click on the corresponding number. The logfile also contains the litmus test code.", "(len(l2) == 0): continue # Name of test s += t for i", "= len(ks1) l = list() for i, k in enumerate(ks1): e = ma.get_entry(k,", "c(L.is_mixed_scope, L.is_shared), # Mixed scopes, mixed memory c(L.is_mixed_scope, L.is_mixed_mem) ] return d def", "args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l = get_axiom_patterns() h =", "'\\\\bottomrule\\n' return s ### Produce latex tables def latex(args): pos = args.pos logs", "names = get_section_names() for f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs)", "latex2(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log) sep = '", "shared memory', 'Different warps, same CTA; mixed memory', 'Mixed scopes, global memory', 'Mixed", "= log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r}", "and f4(e) entry = '-' item = list(filter(f, l2)) if item: item =", "+= latex_tbl2(f, logs, n) w_str(args.out, s) # ------------------------------------------------------------------------------ ### Produce incantations tables #", "assert(hasattr(args, 'diro')) chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while True:", "0b1000) >> 3 i2 = (i & 0b0100) >> 2 i3 = (i", "------------------------------------------------------------------------------ # Html file (including navigation and sections) class HtmlFile: \"\"\"Html file representing", "'coww']), ('observation', ['mp', 'isa2', 'wrc']), ('prop light', ['2+2w', 'w+rw+2w', 's']), ('prop heavy', ['sb',", "for a test and chip, click on the corresponding number. The logfile contains", "logs) s += le.pp_prefix(2) for log in logs: e = log.get(k) if e:", "nc) + r'\\\\' + '\\n' for t in tests: # Get all tests", "return e.short_name.lower() + sep + str(e.pos) l = list(map(mapper, ks)) header = sep.join([\"Test\"", "ma.get_keys(logs) ks_s = set(ks_s) - all_matching ks_s = list(ks_s) if ks_s: h.new_section('Other', 0)", "in enumerate(ks1): e = ma.get_entry(k, logs) l.append(e.short_name.lower() + sep + str(e.pos) + sep)", "entry = item.pos s += item.pp_cell_link_dir(2, args.diro) # Produce file containing raw litmus", "or pickle)') f = cmds[7] p8.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead", "'</ul>\\n' self.last_level -= 1 def new_section(self, heading, level): assert(0 <= level <= 2)", "</body> </html> \"\"\" # Write table to file f_out = out_base + '-'", "assert(0 <= level <= 2) l = str(level+2) s = '<h' + l", "] # Column filters fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)]", "5px; } th, td { text-align: right; padding: 5px; padding-right: 15px; padding-left: 15px;", "else: # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += '<td>' + str(entry) +", "+ link +\\ '</a></li>\\n' elif level < self.last_level: self.close_nav(level) self.nav += li +", "g g r r r r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline", "were insufficient resources on the chip to run the test. </center> <br> <center>", "continue # Name of test s += '<tr>\\n' s += '<td>' + t", "s += '</table>\\n' return s # Filtering according to scopes and memory regions;", "lambda e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl(f, logs,", "the litmus test code. When a dash appears instead of a result, it", "'\">' + link +\\ '</a></li>\\n' else: assert(False) self.last_level = level def close_nav(self, level):", "<= 2) l = str(level+2) s = '<h' + l + '><a id=\"id'", "g g g g r r r r g g g g r", "parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true') # Subparsers sp = p.add_subparsers(help='use <subcommand> -h", "sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction) f = cmds[6] + '.tex' p7.add_argument('-o', '--out', action='store', default=f)", "+ l + '><a id=\"id' + str(self.secn) + '\">' + heading + '</a></h'\\", "optcheck failed or because there were insufficient resources on the chip to run", "memory', 'Different kernels, same device; global memory', 'Different warps, same CTA; shared memory',", "logs, n) s += '\\n' # Produce d-warp:s-cta table, shared memory f =", "import ErrMsg, chk, bail from machinery import LogEntry as L from generic import", "border-bottom: 2px solid black; } table { border-top: none; } </style> </head> <body>", "memory', 'Different CTAs, same kernel; global memory', 'Different kernels, same device; global memory',", "add_nav_item(self, link, level): sp = self.sp li = sp * (level + 1)", "name in zip(filters, names): ks = ma.get_filtered_keys(f, logs, ks_s) if pos: ks =", "Write table to file f_out = out_base + '-' + suf + '.tex'", "n): ks = ma.get_filtered_keys(f, logs) sep = ' & ' s = '\\midrule\\n'", "ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) # Rest all_matching =", "shared memory', 's1-shared'), (lambda e: L.is_cta(e) and L.is_global(e), 'All threads in different CTAs,", "padding: 5px; padding-right: 15px; padding-left: 15px; } td:nth-child(1) { text-align: left; } tr:nth-child(1),", "logs) return e.short_name.lower() + sep + str(e.pos) l = list(map(mapper, ks)) header =", "= list(ks_s) if ks_s: h.new_section('Other', 0) ks_s.sort() filters = get_section_filters() names = get_section_names()", "logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce table with sections according to scopes", "type=\"text/css\" media=\"screen\"/> </head> <body> <div class=\"outer\"> <div class=\"inner\"> <h1>GPU Litmus Test Results</h1> <br>", "\"\"\" </table> </div> </div> </body> </html> \"\"\" # Write table to file f_out", "(i & 0b0001) f1 = fs1[i1] f2 = fs2[i2] f3 = fs3[i3] f4", "if (len(l2) == 0): continue # Name of test s += t for", "s += ' & ' + str(entry) s += '\\\\\\\\\\n' s += '\\\\hline\\n'", "List of functions that each take a log entry d = [ #", "scopes, shared memory', 'Mixed scopes, mixed memory' ] return names # Get key", "== str) chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while True:", "shared memory', 'Mixed scopes, mixed memory' ] return names # Get key patterns", "certain # combination of incantations) are also ignored def incantations(args): log = args.input", "all tests def flat(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args,", "chip_old: break chip_old = chip assert(type(chip) == str) log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log,", "= [inp] inp = ma.get_logs(inp, lh=ma.Log) if not c: inp = inp[0] args.input", "run the test. </center> <br> <center> <table style=\"border:none\"> <tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td", "assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header # '&nbsp;':", "<td>15</td> <td>16</td> </tr> <tr> <td>memory&nbsp;stress</td> <td> </td><td> </td><td> </td><td> </td> <td> </td><td> </td><td>", "+= str(e.pos) + '\\\\\\\\' s = '\\n'.join(l) w_str(args.out, s) ### Produce latex tables", "contains the litmus test code, and the incantations used for the test run.", "return s def latex_tbl2(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep = '", "# ------------------------------------------------------------------------------ ### Fill up table line by line # l: list of", "# ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_flat(args): log = args.input assert(type(log)", "padding-top: 5px; } th, td { text-align: right; padding: 5px; padding-right: 15px; padding-left:", "import sys import collections import textwrap from functools import partial import machinery as", "<title>Evaluating incantations</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style> ul { padding-top: 10px; }", "l1 = list(filter(sf, les)) assert(lty(l1, L)) for sec, tests in lfs.items(): tests.sort() #", "ma.get_filtered_keys(f, logs, ks) ks1.sort() n = len(ks1) l = list() for i, k", "a result, it is either because optcheck failed or because there were insufficient", "number. The logfile contains the litmus test code, and the incantations used for", "all the keys if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) s", "options parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true') # Subparsers sp = p.add_subparsers(help='use <subcommand>", "ma.get_filtered_keys(f, logs) sep = ' & ' s = '' def mapper(k): e", "incantation log log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log =", "0): continue s += t for i in range(0, nc): i1 = (i", "list(map(mapper, ks)) header = sep.join([\"Test\" + sep + \"Freq.\"] * n) + \"\\\\\\\\\\n\"", "return p if __name__ == \"__main__\": if len(sys.argv) == 1: sys.argv += ['-h']", "names def get_section_filters(): def c(f, g): return lambda e: f(e) and g(e) #", "rwc) l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue", "either because optcheck failed or because there were insufficient resources on the chip", "+= '</tr>\\n' # Process rows for k in ks: # Start new row", "'out'): l.append(args.out) chk(not dupchk(l), 'duplicate files given') # Read ordinary logs (if we", "= [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Scope", "# Sections p3 = sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction) f = cmds[2] +", "c(L.is_warp, L.is_mixed_mem), # Mixed scopes, global memory c(L.is_mixed_scope, L.is_global), # Mixed scopes, shared", "</td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td> <td>", "of output filename, default is the command name out_base = args.out assert(out_base) les", "set(ks_s) - all_matching ks_s = list(ks_s) if ks_s: h.new_section('Other', 0) ks_s.sort() filters =", "diro=args.diro) h.add_html(s) all_matching = set(all_matching) if pos: ks = ma.get_pos_keys(logs) else: ks =", "s = s.replace('<chip>', chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for t", "warps, same CTA; global memory', 'Different CTAs, same kernel; global memory', 'Different kernels,", "s += ' <th>' + name + '</th>\\n' s += '</tr>\\n' # Process", "and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl2(f,", "assert(out_base) les = log.get_all() assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() #", "= args.out assert(out_base) les = log.get_all() assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names, str))", "CTAs, global memory', 's2-global') ] # Column filters fs1 = [lambda e: not", "(level + 1) ul = sp * (self.last_level + 1) if level ==", "Process rows for k in ks: # Start new row s += '<tr>\\n'", "self.add_nav_item(heading, level) self.secn += 1 def add_html(self, html): self.items.append(html) def finish(self, nav=True): self.close_nav(-1)", "log = args.input assert(type(log) == str) chip = os.path.basename(log) assert(type(chip) == str) chip_old", "action=InputAction) f = cmds[4] + '.tex' p5.add_argument('-o', '--out', action='store', default=f) p5.set_defaults(func=partial(mux, latex)) #", "list of log objects (only logs which have the key are included in", "nargs='+', action=InputAction) f = cmds[0] + '.html' p1.add_argument('-o', '--out', action='store', default=f) p1.add_argument('-d', '--diro',", "p10.add_argument('-d', '--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return p if __name__ == \"__main__\": if", "L)) for sec, tests in lfs.items(): tests.sort() # Section header s += r'{\\bf", "l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$',", "lambda e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl2(f, logs,", "ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) s = produce_table(ks, logs, diro=args.diro) h", "log = args.input assert(type(log) == str) # Get chip name chip = os.path.basename(log)", "+= '\\\\bottomrule\\n' return s def latex_tbl2(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep", "else: assert(False) self.last_level = level def close_nav(self, level): sp = self.sp while self.last_level", "cmds[9] p10.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default name)') p10.add_argument('-d',", "Incantations:} & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS}", "e: L.is_shared(e) and (L.is_warp(e) or L.does_match(e, lc)) ks2 = ma.get_filtered_keys(f, logs, ks) ks2.sort()", "s += ' <th>Name</th>\\n' for log in logs: # Remove directory prefix and", "['sb', 'rwc', 'iriw', 'r']), ('thin air', ['lb']) ] lfs = collections.OrderedDict(lfs) for sf,", "table with sections according to axioms def classified(args): pos = args.pos logs =", "</td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td>", "1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for t in short_names: l2 =", "= out_base + '-' + suf + '.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ###", "import argparse import os import sys import collections import textwrap from functools import", "id=\"id' + str(self.secn) + '\">' + heading + '</a></h'\\ + l + '>\\n'", "mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower() + sep + str(e.pos) l =", "tables def incantations_html_flat(args): log = args.input assert(type(log) == str) assert(hasattr(args, 'diro')) chip =", "if (len(l2) == 0): continue # Name of test s += '<tr>\\n' s", "end l = l[nl:] return s def latex_tbl(f, logs, n): ks = ma.get_filtered_keys(f,", "1 i4 = (i & 0b0001) f1 = fs1[i1] f2 = fs2[i2] f3", "g g g r r r r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\", "if not c: inp = inp[0] args.input = inp f(args) ############### # Subcommands", "global memory', 's2-global') ] # Column filter building blocks (need to be combined", "' s = '\\midrule\\n' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower(), str(e.pos)", "= itemify(item) assert(type(item) == L) entry = item.pos # ppi_incantations: mem_stress, general_bc, barrier,", "to be called; args: arguments to the function def mux(f, args): inp =", "### Produce flat table with all tests def flat(args): pos = args.pos logs", "and L.does_match(e, l))) s += latex_tbl(f, logs, n) w_str(args.out, s) def latex2(args): pos", "<th>' + name + '</th>\\n' s += '</tr>\\n' # Process rows for k", "[ # Simple scopes, global memory c(L.is_warp, L.is_global), c(L.is_cta, L.is_global), c(L.is_ker, L.is_global), #", "ma.get_matching_keys(val, logs) if pos: ks_s = ma.get_pos_keys(logs, ks_s) all_matching += ks_s if ks_s:", "e = ma.get_entry(k, logs) l[i] += str(e.pos) + sep # s2 + global", "litmus log item.store_log_dir(args.diro) else: # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += '<td>'", "<td>1</td> <td>2</td> <td>3</td> <td>4</td> <td>5</td> <td>6</td> <td>7</td> <td>8</td> <td>9</td> <td>10</td> <td>11</td> <td>12</td> <td>13</td>", "header += '\\midrule\\n' s = header + fill_up(l, sep, '\\\\\\\\\\n', n) s +=", "'\\n' # Produce d-cta:s-ker table, global memory f = lambda e: L.is_global(e) and", "memory f = lambda e: L.is_shared(e) and (L.is_warp(e) or L.does_match(e, lc)) ks2 =", "lty, interleave, itemify, dupchk, listify, w_str # ------------------------------------------------------------------------------ # Html file (including navigation", "partial import machinery as ma from machinery import ErrMsg, chk, bail from machinery", "link, level): sp = self.sp li = sp * (level + 1) ul", "logs (if we do not want to read an incantation log) if f", "Test Results</h1> <br> <center> To view the logfile for a test and chip,", "cmds[2] + '.html' p3.add_argument('-o', '--out', action='store', default=f) p3.add_argument('-d', '--diro', action='store', default='entries') p3.set_defaults(func=partial(mux, sections))", "3 i2 = (i & 0b0100) >> 2 i3 = (i & 0b0010)", "+ '-' + suf + '.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat", "# All tests that are not explicitely listed under 'line filters' in this", "continue s += t for i in range(0, nc): i1 = (i &", "def two_level(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l", "' & ' s = '' def mapper(k): e = ma.get_entry(k, logs) return", "that match a simple test name (like rwc) l2 = list(filter(partial(L.simple_match, s=t), l1))", "'.html' p1.add_argument('-o', '--out', action='store', default=f) p1.add_argument('-d', '--diro', action='store', default='entries') p1.set_defaults(func=partial(mux, flat)) # Classified", "key patterns per axiom def get_axiom_patterns(): l = [ ('SC per location', ['CO',", "def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower() + sep + str(e.pos) l", "<div class=\"inner\"> <h1>GPU Litmus Test Results</h1> <br> <center> To view the logfile for", "= sp.add_parser(cmds[9], description='Produce flat html tables comparing\\ the effectiveness of the incantations') p10.add_argument('input',", "chip_old = chip assert(type(chip) == str) log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log)", "scopes, shared memory c(L.is_mixed_scope, L.is_shared), # Mixed scopes, mixed memory c(L.is_mixed_scope, L.is_mixed_mem) ]", "ma.get_filtered_keys(f, logs) if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 0) s", "= textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html> <head> <meta charset=\"UTF-8\"> <title>GPU Litmus Test Results</title> <link", "+ '\\n' for t in tests: # Get all tests that match a", "argparse.ArgumentParser() # Dummy parent for common options parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true')", "<body> <div class=\"outer\" style=\"width: 100%;\"> <div class=\"inner\"> <h1>Evaluating incantations</h1> <br> <center> To view", "log entry d = [ # Simple scopes, global memory c(L.is_warp, L.is_global), c(L.is_cta,", "Command line parser # ####################### # Open files and parse or unpickle class", "# Get key patterns per axiom def get_axiom_patterns(): l = [ ('SC per", "range(0, nc): i1 = (i & 0b1000) >> 3 i2 = (i &", "'<tr>\\n' s += ' <th>Scope tree</th>\\n' s += ' <th>Memory map</th>\\n' s +=", "le = ma.get_entry(k, logs) s += le.pp_prefix(2) for log in logs: e =", "td { text-align: right; padding: 5px; padding-right: 15px; padding-left: 15px; } td:nth-child(1) {", "log.get_all() assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header prefix", "f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs, ks_s) if pos: ks", "= l[:nl] line = sep.join(chunk) s += line + ((nl - len(chunk)) *", "item.pos # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += ' & ' +", "logs, ks) ks3.sort() assert(len(ks3) == n) for i, k in enumerate(ks3): e =", "yield a single column # filter) fs1 = [lambda e: not L.is_mem_stress(e), lambda", "to # names def get_section_filters(): def c(f, g): return lambda e: f(e) and", "L.is_global(e), 'All threads in different warps, global memory', 's1-global'), (lambda e: L.is_warp(e) and", "default=f) p3.add_argument('-d', '--diro', action='store', default='entries') p3.set_defaults(func=partial(mux, sections)) # Two-level p4 = sp.add_parser(cmds[3], parents=[parent])", "assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l = get_axiom_patterns() h = HtmlFile() all_matching = []", "produce_table(ks, logs, diro=args.diro) h = HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------ ### Fill", "to file f_out = out_base + '-' + suf + '.tex' w_str(f_out, s)", "L.is_barrier(e), lambda e: L.is_barrier(e)] fs4 = [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)]", "= [ (lambda e: L.is_warp(e) and L.is_global(e), 'All threads in different warps, global", "h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce flat", "default='entries') p3.set_defaults(func=partial(mux, sections)) # Two-level p4 = sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction) f", "'><a id=\"id' + str(self.secn) + '\">' + heading + '</a></h'\\ + l +", "produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce table with sections according to", "assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] # Prefix of output filename,", "action='store', default='entries') p1.set_defaults(func=partial(mux, flat)) # Classified p2 = sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction)", "functools import partial import machinery as ma from machinery import ErrMsg, chk, bail", "test code, and the incantations used for the test run. </center> <br><br> \"\"\")", "ks.sort() s = produce_table(ks, logs) h.add_html(s) h.finish() h.write(args.out) ### Two level classification def", "flat(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) # Get", "Section header s += r'{\\bf ' + sec + '}' + (' &'", "p5.set_defaults(func=partial(mux, latex)) # Latex 2 p6 = sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction) f =", "# Subparsers sp = p.add_subparsers(help='use <subcommand> -h for further help', title= 'subcommands') #", "------------------------------------------------------------------------------ ############ # Toplevel # ############ # f: function to be called; args:", "+= ks if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) all_matching", "itemify, dupchk, listify, w_str # ------------------------------------------------------------------------------ # Html file (including navigation and sections)", "class=\"outer\"> <div class=\"inner\"> <h1>GPU Litmus Test Results</h1> <br> <center> To view the logfile", "def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) def get_cmdline_parser(cmds): # Parent", "logs) sep = ' & ' s = '' def mapper(k): e =", "Get key patterns per axiom def get_axiom_patterns(): l = [ ('SC per location',", "Produce incantations tables # All tests that are not explicitely listed under 'line", "chip, click on the corresponding number. The logfile contains the litmus test code,", "textwrap.dedent(r\"\"\" <!DOCTYPE html> <html style=\"background:white;\"> <head> <meta charset=\"UTF-8\"> <title>Evaluating incantations</title> <link rel=\"stylesheet\" href=\"common.css\"", "diro='entries'): logs = [ l for l in logs if l.any_key(ks) ] s", "else: s += '<td><a href=\"\">---</a></td>\\n' s += '</tr>\\n' s += '</table>\\n' return s", "functions names = [ 'Different warps, same CTA; global memory', 'Different CTAs, same", "+ str(entry) s += '\\\\\\\\\\n' s += '\\\\hline\\n' s += '\\\\end{tabular}\\n' # Write", "and suffix name = os.path.basename(log.fn) idx = name.find('.') if idx != -1: name", "and L.does_match(e, l))) s += latex_tbl2(f, logs, n) w_str(args.out, s) # ------------------------------------------------------------------------------ ###", "padding-left: 15px; } td:nth-child(1) { text-align: left; } tr:nth-child(1), tr:nth-child(5) { border-bottom: 2px", "\"\"\" # Write table to file f_out = out_base + '-' + suf", "+= ['-h'] cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds = ['flat', 'classified', 'sections', 'two-level', 'latex',", "d-cta:s-ker table, global memory f = lambda e: L.is_global(e) and \\ ((L.is_cta(e) and", "ks_s = ma.get_keys(logs) ks_s = set(ks_s) - all_matching ks_s = list(ks_s) if ks_s:", "'s2-global') ] # Column filter building blocks (need to be combined to yield", "<tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip> </td> </tr> <tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td>", "L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Line filters lfs = [", "= fs4[i4] f = lambda e: f1(e) and f2(e) and f3(e) and f4(e)", "import partial import machinery as ma from machinery import ErrMsg, chk, bail from", "f4 = fs4[i4] f = lambda e: f1(e) and f2(e) and f3(e) and", "memory c(L.is_mixed_scope, L.is_mixed_mem) ] return d def get_section_names(): # Parallel the above functions", "# Simple scopes, mixed memory c(L.is_warp, L.is_mixed_mem), # Mixed scopes, global memory c(L.is_mixed_scope,", "# Get chip name chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip", "''.join(l) def write(self, fn): assert(self.s) f = open(fn, 'w') f.write(self.s) f.close() # ------------------------------------------------------------------------------", "# f: function to be called; args: arguments to the function def mux(f,", "kernel; global memory', 'Different kernels, same device; global memory', 'Different warps, same CTA;", "= args.pos logs = args.input assert(type(logs) == ma.Log) n = 4 l =", "in different CTAs, global memory', 's2-global') ] # Column filters fs1 = [lambda", "memory', 's1-shared'), (lambda e: L.is_cta(e) and L.is_global(e), 'All threads in different CTAs, global", "checkmark prefix = textwrap.dedent(r\"\"\" <!DOCTYPE html> <html style=\"background:white;\"> <head> <meta charset=\"UTF-8\"> <title>Evaluating incantations</title>", "Column filters fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 =", "+ name + '</th>\\n' s += '</tr>\\n' # Process rows for k in", "& \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra", "['CoWW', 'COWW']))) s = latex_tbl(f, logs, n) s += '\\n' # Produce d-warp:s-cta", "'</th>\\n' s += '</tr>\\n' # Process rows for k in ks: # Start", "sep, end, nl): n = len(l) s = \"\" while l: chunk =", "a log entry d = [ # Simple scopes, global memory c(L.is_warp, L.is_global),", "for sf, cfg, suf in sfs: s = prefix s = s.replace('<config>', cfg,", "cmds[6] + '.tex' p7.add_argument('-o', '--out', action='store', default=f) p7.set_defaults(func=partial(mux, latex3)) # Incantations p8 =", "<td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td>", "5px; padding-right: 15px; padding-left: 15px; } td:nth-child(1) { text-align: left; } tr:nth-child(1), tr:nth-child(5)", "assert(type(logs) == ma.Log) n = 8 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]',", "sep = ' & ' s = '' def mapper(k): e = ma.get_entry(k,", "scopes, mixed memory' ] return names # Get key patterns per axiom def", "# s1 + shared memory f = lambda e: L.is_shared(e) and (L.is_warp(e) or", "s += '\\\\bottomrule\\n' return s def latex_tbl2(f, logs, n): ks = ma.get_filtered_keys(f, logs)", "Sections p3 = sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction) f = cmds[2] + '.html'", ">> 3 i2 = (i & 0b0100) >> 2 i3 = (i &", "<td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td>", "Simple scopes, shared memory c(L.is_warp, L.is_shared), # Simple scopes, mixed memory c(L.is_warp, L.is_mixed_mem),", "'\">' + heading + '</a></h'\\ + l + '>\\n' self.items.append(s) self.add_nav_item(heading, level) self.secn", "L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl(f, logs, n) w_str(args.out,", "e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl2(f, logs, n)", "scopes, mixed memory c(L.is_mixed_scope, L.is_mixed_mem) ] return d def get_section_names(): # Parallel the", "= args.pos logs = args.input assert(type(logs) == ma.Log) n = 8 l =", "= len(l) s = \"\" while l: chunk = l[:nl] line = sep.join(chunk)", "single column # filter) fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)]", "Remove directory prefix and suffix name = os.path.basename(log.fn) idx = name.find('.') if idx", "self.secn += 1 def add_html(self, html): self.items.append(html) def finish(self, nav=True): self.close_nav(-1) l =", "to axioms def classified(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args,", "ma.get_keys(logs) s = produce_table(ks, logs, diro=args.diro) h = HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out) #", "+ '</a></h'\\ + l + '>\\n' self.items.append(s) self.add_nav_item(heading, level) self.secn += 1 def", "+= latex_tbl(f, logs, n) w_str(args.out, s) def latex2(args): pos = args.pos logs =", "# '&nbsp;': non-breaking space # '&#x2713;': checkmark prefix = textwrap.dedent(r\"\"\" <!DOCTYPE html> <html", "= args.input assert(type(logs) == ma.Log) n = 8 l = ['CO', 'Co', 'LB[^+]',", "<br> <center> To view the logfile for a test and chip, click on", "zip(*l) l = interleave(l1, l2, n) s = fill_up(l, sep, '\\\\\\\\\\n', n) s", "'\\midrule\\n' s = header + fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return", "p7.add_argument('-o', '--out', action='store', default=f) p7.set_defaults(func=partial(mux, latex3)) # Incantations p8 = sp.add_parser(cmds[7], description='Produce tables", "of line # n: number of elements on line def fill_up(l, sep, end,", "== n) for i, k in enumerate(ks3): e = ma.get_entry(k, logs) l[i] +=", "suffix after tables suffix = textwrap.dedent(\"\"\" </div> </div> </body> </html> \"\"\") def __init__(self):", "ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish()", "tables def incantations_flat(args): log = args.input assert(type(log) == str) chip = os.path.basename(log) assert(type(chip)", "<td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td>", "logs) if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 0) s =", "not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 = [lambda e: not L.is_general_bc(e), lambda e:", "f4(e) entry = '-' item = list(filter(f, l2)) if item: item = itemify(item)", "<td>7</td> <td>8</td> <td>9</td> <td>10</td> <td>11</td> <td>12</td> <td>13</td> <td>14</td> <td>15</td> <td>16</td> </tr> <tr> <td>memory&nbsp;stress</td>", "for common options parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true') # Subparsers sp =", "f2 = fs2[i2] f3 = fs3[i3] f4 = fs4[i4] f = lambda e:", "s += line + ((nl - len(chunk)) * sep) + end l =", "it is either because optcheck failed or because there were insufficient resources on", "logs, diro=args.diro) h = HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------ ### Fill up", "def get_axiom_patterns(): l = [ ('SC per location', ['CO', 'Co']), ('No Thin Air',", "name)') p8.set_defaults(func=partial(mux, incantations)) # Incantations flat p9 = sp.add_parser(cmds[8], description='Produce flat tables comparing", "to yield a single column # filter) fs1 = [lambda e: not L.is_mem_stress(e),", "</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td>", "h.add_html(s) h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------ ### Fill up table line by line #", "Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy',", "line parser # ####################### # Open files and parse or unpickle class InputAction(argparse.Action):", "from machinery import LogEntry as L from generic import lty, interleave, itemify, dupchk,", "= textwrap.dedent(\"\"\" </div> </div> </body> </html> \"\"\") def __init__(self): self.items = [] self.nav", "L.is_global), c(L.is_ker, L.is_global), # Simple scopes, shared memory c(L.is_warp, L.is_shared), # Simple scopes,", "lambda e: L.is_barrier(e)] fs4 = [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc", "HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------ ### Fill up table line by line", "= [ ('SC per location', ['CO', 'Co']), ('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)',", "is either because optcheck failed or because there were insufficient resources on the", "all_matching = [] for name, val in l: ks = ma.get_matching_keys(val, logs) if", "diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce table with sections according to scopes and", "same device; global memory', 'Different warps, same CTA; shared memory', 'Different warps, same", "0) # Now divide by other sections filters = get_section_filters() names = get_section_names()", "R & S & R+S & none & R & S & R+S\\\\", "lfs.items(): tests.sort() # Section header s += r'{\\bf ' + sec + '}'", "not want to read an incantation log) if f != incantations and f", "table to file f_out = out_base + '-' + suf + '.html' w_str(f_out,", "scopes and memory regions def sections(args): pos = args.pos logs = args.input assert(lty(logs,", "</td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\") # Scope and mem", "incantations(args): log = args.input assert(type(log) == str) # Get chip name chip =", "s += '\\\\hline\\n' s += '\\\\end{tabular}\\n' # Write table to file f_out =", "mux(f, args): inp = args.input l = list(listify(inp)) if hasattr(args, 'out'): l.append(args.out) chk(not", "h.add_html(s) h.finish() h.write(args.out) ### Produce flat table with all tests def flat(args): pos", "i4 = (i & 0b0001) f1 = fs1[i1] f2 = fs2[i2] f3 =", "= args.input assert(type(log) == str) # Get chip name chip = os.path.basename(log) assert(type(chip)", "'--out', action='store', default=f) p3.add_argument('-d', '--diro', action='store', default='entries') p3.set_defaults(func=partial(mux, sections)) # Two-level p4 =", "pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 0) s = produce_table(ks, logs,", "L.is_warp(e) and L.is_global(e), 'All threads in different warps, global memory', 's1-global'), (lambda e:", "log = args.input assert(type(log) == str) assert(hasattr(args, 'diro')) chip = os.path.basename(log) assert(type(chip) ==", "solid black; } table { border-top: none; } </style> </head> <body> <div class=\"outer\"", "'</a></li>\\n' elif level < self.last_level: self.close_nav(level) self.nav += li + '<li><a href=\"#id' +", "args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) # Get all the keys if pos: ks", "####################### # Open files and parse or unpickle class InputAction(argparse.Action): def __call__(self, parser,", "### Produce table with sections according to scopes and memory regions def sections(args):", "divide by other sections filters = get_section_filters() names = get_section_names() for f, name", "+ '.tex' p5.add_argument('-o', '--out', action='store', default=f) p5.set_defaults(func=partial(mux, latex)) # Latex 2 p6 =", "</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td>", "</td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td>", "* self.last_level + '</ul>\\n' self.last_level -= 1 def new_section(self, heading, level): assert(0 <=", "sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction) f = cmds[4] + '.tex' p5.add_argument('-o', '--out', action='store', default=f)", "are also ignored def incantations(args): log = args.input assert(type(log) == str) # Get", "= cmds[0] + '.html' p1.add_argument('-o', '--out', action='store', default=f) p1.add_argument('-d', '--diro', action='store', default='entries') p1.set_defaults(func=partial(mux,", "== L) entry = item.pos s += item.pp_cell_link_dir(2, args.diro) # Produce file containing", "table with sections according to scopes and memory regions def sections(args): pos =", "l # ------------------------------------------------------------------------------ ############ # Toplevel # ############ # f: function to be", "lambda e: L.is_global(e) and (L.is_cta(e) or L.does_match(e, lc)) ks3 = ma.get_filtered_keys(f, logs, ks)", "= [ ('uniproc', ['corr', 'corw', 'cowr', 'coww']), ('observation', ['mp', 'isa2', 'wrc']), ('prop light',", "incantations') p9.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[8] p9.add_argument('-o', '--out', action='store',", "f = cmds[5] + '.tex' p6.add_argument('-o', '--out', action='store', default=f) p6.set_defaults(func=partial(mux, latex2)) # Latex", "<div class=\"inner\"> <h1>Evaluating incantations</h1> <br> <center> To view the logfile for a test,", "<body> <div class=\"outer\"> <div class=\"inner\"> <h1>GPU Litmus Test Results</h1> <br> <center> To view", "HTML file producers # ks: list of test names to include in the", "+= '</tr>\\n' s += '</table>\\n' return s # Filtering according to scopes and", "style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config> </td> </tr> </table> </center> <br> <table> <tr>", "filters' in this file # are ignored; non-existing tests and non-existing entries (e.g.", "sec + '}' + (' &' * nc) + r'\\\\' + '\\n' for", "from generic import lty, interleave, itemify, dupchk, listify, w_str # ------------------------------------------------------------------------------ # Html", "= ma.get_keys(logs) s = produce_table(ks, logs, diro=args.diro) h = HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out)", "= lambda e: L.is_shared(e) and (L.is_warp(e) or L.does_match(e, lc)) ks2 = ma.get_filtered_keys(f, logs,", "for i, k in enumerate(ks1): e = ma.get_entry(k, logs) l.append(e.short_name.lower() + sep +", "n) s += '\\\\bottomrule\\n' return s def latex_tbl2(f, logs, n): ks = ma.get_filtered_keys(f,", "= item.pos # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += ' & '", "memory c(L.is_mixed_scope, L.is_global), # Mixed scopes, shared memory c(L.is_mixed_scope, L.is_shared), # Mixed scopes,", "lambda e: f1(e) and f2(e) and f3(e) and f4(e) entry = '-' item", "# Line filters lfs = [ ('uniproc', ['corr', 'corw', 'cowr', 'coww']), ('observation', ['mp',", "tables def latex(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log) n", "L.is_shared(e) and (L.is_warp(e) or L.does_match(e, lc)) ks2 = ma.get_filtered_keys(f, logs, ks) ks2.sort() assert(len(ks2)", "# Simple scopes, shared memory c(L.is_warp, L.is_shared), # Simple scopes, mixed memory c(L.is_warp,", "parents=[parent]) p6.add_argument('input', action=InputAction) f = cmds[5] + '.tex' p6.add_argument('-o', '--out', action='store', default=f) p6.set_defaults(func=partial(mux,", "end, nl): n = len(l) s = \"\" while l: chunk = l[:nl]", "& ' l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$',", "not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Scope and mem filters,", "for t in tests: # Get all tests that match a simple test", "results\"\"\" sp = ' ' # HTML prefix before tables prefix = textwrap.dedent(\"\"\"\\", "= latex_tbl(f, logs, n) s += '\\n' # Produce d-warp:s-cta table, shared memory", "the litmus test code, and the incantations used for the test run. </center>", "2) l = str(level+2) s = '<h' + l + '><a id=\"id' +", "and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl(f, logs, n) s", "logs) # Names + s1 + global memory f = lambda e: L.is_global(e)", "name = os.path.basename(log.fn) idx = name.find('.') if idx != -1: name = name[:idx]", "'<li><a href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif level <", "parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction) f = cmds[2] + '.html' p3.add_argument('-o', '--out', action='store', default=f)", "s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue s += t for", "if f != incantations and f != incantations_flat and f != incantations_html_flat: c", "no filtering according to # names def get_section_filters(): def c(f, g): return lambda", "if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) s = produce_table(ks, logs,", "s) # ------------------------------------------------------------------------------ ### Produce incantations tables # All tests that are not", "-= 1 def new_section(self, heading, level): assert(0 <= level <= 2) l =", "('Propagation Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ]) ] return l # ------------------------------------------------------------------------------", "def get_section_filters(): def c(f, g): return lambda e: f(e) and g(e) # List", "</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td>", "mem_stress, general_bc, barrier, rand_threads s += '<td>' + str(entry) + '</td>' s +=", "& ' s = '\\midrule\\n' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower(),", "= get_axiom_patterns() h = HtmlFile() all_matching = [] for name, val in l:", "h.new_section('Other', 0) ks_s.sort() filters = get_section_filters() names = get_section_names() for f, name in", "pos: ks = ma.get_pos_keys(logs, ks) all_matching += ks if ks: h.new_section(name, 0) s", "li = sp * (level + 1) ul = sp * (self.last_level +", "+ '\">' + link +\\ '</a></li>\\n' elif level == self.last_level + 1: self.nav", "the\\ effectiveness of the incantations') p8.add_argument('input', action=InputAction, help='log (text or pickle)') f =", "= ma.get_filtered_keys(f, logs) sep = ' & ' s = '\\midrule\\n' def mapper(k):", "header = sep.join([\"Test\" + sep + \"Freq.\"] * n) + \"\\\\\\\\\\n\" header +=", "'\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s def latex_tbl2(f, logs, n): ks =", "= [self.prefix] if nav: l += [self.nav] l += self.items + [self.suffix] self.s", "incantations') p8.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[7] p8.add_argument('-o', '--out', action='store',", "flat table with all tests def flat(args): pos = args.pos logs = args.input", "= produce_table(ks, logs) h.add_html(s) h.finish() h.write(args.out) ### Two level classification def two_level(args): pos", "n) s += '\\n' # Produce d-cta:s-ker table, global memory f = lambda", "are not explicitely listed under 'line filters' in this file # are ignored;", "sfs = [ (lambda e: L.is_warp(e) and L.is_global(e), 'All threads in different warps,", "'--diro', action='store', default='entries') p3.set_defaults(func=partial(mux, sections)) # Two-level p4 = sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+',", "<br><br> \"\"\") # HTML suffix after tables suffix = textwrap.dedent(\"\"\" </div> </div> </body>", "e = ma.get_entry(k, logs) return e.short_name.lower(), str(e.pos) l = list(map(mapper, ks)) l1, l2", "[ (lambda e: L.is_warp(e) and L.is_global(e), 'All threads in different warps, global memory',", "</tr> <tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td>", "effectiveness of the incantations') p8.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[7]", "memory regions; no filtering according to # names def get_section_filters(): def c(f, g):", "l = list(listify(inp)) if hasattr(args, 'out'): l.append(args.out) chk(not dupchk(l), 'duplicate files given') #", "1) log = log[0] out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L))", "fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s ### Produce latex tables", "# Scope and mem filters, table description, filename suffix for sf, cfg, suf", "l[nl:] return s def latex_tbl(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep =", "tests that are not explicitely listed under 'line filters' in this file #", "self.close_nav(-1) l = [self.prefix] if nav: l += [self.nav] l += self.items +", "'WRC', 'ISA2']), ('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC',", "explicitely listed under 'line filters' in this file # are ignored; non-existing tests", "g g g r r r r g g g g r r", "sp = self.sp li = sp * (level + 1) ul = sp", "Produce flat incantation tables def incantations_flat(args): log = args.input assert(type(log) == str) chip", "<meta charset=\"UTF-8\"> <title>Evaluating incantations</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style> ul { padding-top:", "want to read an incantation log) if f != incantations and f !=", "<th>Memory map</th>\\n' s += ' <th>Name</th>\\n' for log in logs: # Remove directory", "Incantations:} & \\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none} &", "+= '<tr>\\n' s += ' <th>Scope tree</th>\\n' s += ' <th>Memory map</th>\\n' s", "if __name__ == \"__main__\": if len(sys.argv) == 1: sys.argv += ['-h'] cmd =", "------------------------------------------------------------------------------ ### Produce incantations tables # All tests that are not explicitely listed", "'</a></li>\\n' else: assert(False) self.last_level = level def close_nav(self, level): sp = self.sp while", "le.pp_prefix(2) for log in logs: e = log.get(k) if e: s += e.pp_cell_link_dir(2,", "= argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true') # Subparsers sp = p.add_subparsers(help='use <subcommand> -h for", "logfile contains the litmus test code, and the incantations used for the test", "L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl2(f, logs, n) w_str(args.out,", "ks_s if ks_s: h.new_section(name, 0) # Now divide by other sections filters =", "<td>10</td> <td>11</td> <td>12</td> <td>13</td> <td>14</td> <td>15</td> <td>16</td> </tr> <tr> <td>memory&nbsp;stress</td> <td> </td><td> </td><td>", "p5 = sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction) f = cmds[4] + '.tex' p5.add_argument('-o', '--out',", "' <th>Name</th>\\n' for log in logs: # Remove directory prefix and suffix name", "memory regions def sections(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args,", "classification def two_level(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro'))", "</style> </head> <body> <div class=\"outer\" style=\"width: 100%;\"> <div class=\"inner\"> <h1>Evaluating incantations</h1> <br> <center>", "'<li><a href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' else: assert(False) self.last_level", "action='store', default=f) p6.set_defaults(func=partial(mux, latex2)) # Latex 3 p7 = sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction)", "pickle)') f = cmds[8] p9.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of", "h.write(args.out) # ------------------------------------------------------------------------------ ### Fill up table line by line # l: list", "= list(map(mapper, ks)) header = sep.join([\"Test\" + sep + \"Freq.\"] * n) +", "tests that match a simple test name (like rwc) l2 = list(filter(partial(L.simple_match, s=t),", "of default name)') p8.set_defaults(func=partial(mux, incantations)) # Incantations flat p9 = sp.add_parser(cmds[8], description='Produce flat", "L.does_match(e, lc)) ks2 = ma.get_filtered_keys(f, logs, ks) ks2.sort() assert(len(ks2) == n) for i,", "'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ]) ] return l # ------------------------------------------------------------------------------ ############ # Toplevel", "+ '\\\\\\\\' s = '\\n'.join(l) w_str(args.out, s) ### Produce latex tables def latex3(args):", "p4 = sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction) f = cmds[3] + '.html' p4.add_argument('-o',", "f_out = out_base + '-' + suf + '.html' w_str(f_out, s) # ------------------------------------------------------------------------------", "with sections according to axioms def classified(args): pos = args.pos logs = args.input", "15px; padding-left: 15px; } td:nth-child(1) { text-align: left; } tr:nth-child(1), tr:nth-child(5) { border-bottom:", "</table> </div> </div> </body> </html> \"\"\" # Write table to file f_out =", "ma.Log)) assert(hasattr(args, 'diro')) l = get_axiom_patterns() h = HtmlFile() all_matching = [] for", "for further help', title= 'subcommands') # Flat p1 = sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+',", "values) def get_cmdline_parser(cmds): # Parent of all p = argparse.ArgumentParser() # Dummy parent", "f.close() # ------------------------------------------------------------------------------ ### Used by all HTML file producers # ks: list", "e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl(f, logs, n)", "</tr> </table> </center> <br> <table> <tr> <td> </td> <td>1</td> <td>2</td> <td>3</td> <td>4</td> <td>5</td>", "+= r'{\\bf ' + sec + '}' + (' &' * nc) +", "by all HTML file producers # ks: list of test names to include", "not L.is_barrier(e), lambda e: L.is_barrier(e)] fs4 = [lambda e: not L.is_rand_threads(e), lambda e:", "for t in short_names: l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2)", "pos = args.pos logs = args.input assert(type(logs) == ma.Log) sep = ' &", "['2+2w', 'w+rw+2w', 's']), ('prop heavy', ['sb', 'rwc', 'iriw', 'r']), ('thin air', ['lb']) ]", "global memory c(L.is_mixed_scope, L.is_global), # Mixed scopes, shared memory c(L.is_mixed_scope, L.is_shared), # Mixed", "and f != incantations_html_flat: c = type(inp) is list if not c: inp", "Results</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head> <body> <div class=\"outer\"> <div class=\"inner\"> <h1>GPU", "+ r'\\\\' + '\\n' for t in tests: # Get all tests that", "s = '\\midrule\\n' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower(), str(e.pos) l", "of test names to include in the table # logs: list of log", "on the corresponding number. The logfile also contains the litmus test code. When", "'<td><a href=\"\">---</a></td>\\n' s += '</tr>\\n' s += '</table>\\n' return s # Filtering according", "+ str(e.pos) + sep) # s1 + shared memory f = lambda e:", "fs3[i3] f4 = fs4[i4] f = lambda e: f1(e) and f2(e) and f3(e)", "# n: number of elements on line def fill_up(l, sep, end, nl): n", "' ' # HTML prefix before tables prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html>", "### Fill up table line by line # l: list of items #", "get_section_names(): # Parallel the above functions names = [ 'Different warps, same CTA;", "'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] #", "= log.get_all() assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header", "</td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\") # Scope and mem filters, including table description and", "comparing the\\ effectiveness of the incantations') p9.add_argument('input', action=InputAction, help='log (text or pickle)') f", "latex_tbl(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep = ' & ' s", "href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style> ul { padding-top: 10px; } li { padding-top: 5px;", "s = produce_table(ks, logs, diro=args.diro) h = HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------", "patterns per axiom def get_axiom_patterns(): l = [ ('SC per location', ['CO', 'Co']),", "(L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl(f, logs, n) s += '\\n' # Produce", "ma.Log) n = 4 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]',", "(self.last_level + 1) if level == self.last_level: self.nav += li + '<li><a href=\"#id'", "memory', 'Mixed scopes, shared memory', 'Mixed scopes, mixed memory' ] return names #", "'\\n' # Produce d-warp:s-cta table, shared memory f = lambda e: L.is_shared(e) and", "def latex2(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log) sep =", "1: self.nav += ul + '<ul>\\n' self.nav += li + '<li><a href=\"#id' +", "h.finish() h.write(args.out) ### Two level classification def two_level(args): pos = args.pos logs =", "= set(all_matching) if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) ks =", "['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ]) ] return", "'All threads in different CTAs, global memory', 's2-global') ] # Column filter building", "and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl(f,", "+ '.tex' p7.add_argument('-o', '--out', action='store', default=f) p7.set_defaults(func=partial(mux, latex3)) # Incantations p8 = sp.add_parser(cmds[7],", "'\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s ### Produce latex tables def latex(args):", "ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish()", "style=\"text-align:left\"> <chip> </td> </tr> <tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config> </td> </tr>", "flat tables comparing the\\ effectiveness of the incantations') p9.add_argument('input', action=InputAction, help='log (text or", "scopes, global memory c(L.is_warp, L.is_global), c(L.is_cta, L.is_global), c(L.is_ker, L.is_global), # Simple scopes, shared", "global memory f = lambda e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e, l)))", "= (i & 0b0100) >> 2 i3 = (i & 0b0010) >> 1", "all p = argparse.ArgumentParser() # Dummy parent for common options parent = argparse.ArgumentParser(add_help=False)", "HtmlFile() filters = get_section_filters() names = get_section_names() for f, name in zip(filters, names):", "'\">' + link +\\ '</a></li>\\n' elif level < self.last_level: self.close_nav(level) self.nav += li", "Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g g g", "python3 import argparse import os import sys import collections import textwrap from functools", "+ '<li><a href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif level", "= list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue s +=", "0b0001) f1 = fs1[i1] f2 = fs2[i2] f3 = fs3[i3] f4 = fs4[i4]", "insufficient resources on the chip to run the test. </center> <br> <center> <table", "tables prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html> <head> <meta charset=\"UTF-8\"> <title>GPU Litmus Test", "2 i3 = (i & 0b0010) >> 1 i4 = (i & 0b0001)", "Produce file containing raw litmus log e.store_log_dir(diro) else: s += '<td><a href=\"\">---</a></td>\\n' s", "g g r r r r g g g g r r r", "= args.input assert(type(logs) == ma.Log) sep = ' & ' l = ['CO',", "ks_s) if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 1) s =", "= ma.get_entry(k, logs) return e.short_name.lower() + sep + str(e.pos) l = list(map(mapper, ks))", "action=InputAction) f = cmds[0] + '.html' p1.add_argument('-o', '--out', action='store', default=f) p1.add_argument('-d', '--diro', action='store',", "and (L.is_warp(e) or L.does_match(e, lc)) ks1 = ma.get_filtered_keys(f, logs, ks) ks1.sort() n =", "action='store', default=f, help='output file basename (instead of default name)') p8.set_defaults(func=partial(mux, incantations)) # Incantations", "with sections according to scopes and memory regions def sections(args): pos = args.pos", "+ l + '>\\n' self.items.append(s) self.add_nav_item(heading, level) self.secn += 1 def add_html(self, html):", "return s ### Produce latex tables def latex(args): pos = args.pos logs =", "return s def latex_tbl(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep = '", "l))) s += latex_tbl2(f, logs, n) w_str(args.out, s) # ------------------------------------------------------------------------------ ### Produce incantations", "== str) chip_old = chip while True: chip = os.path.splitext(chip)[0] if chip ==", "str) assert(hasattr(args, 'diro')) chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while", "</td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\") # Scope and mem filters, including table", "action='store', default=f) p4.add_argument('-d', '--diro', action='store', default='entries') p4.set_defaults(func=partial(mux, two_level)) # Latex p5 = sp.add_parser(cmds[4],", "+\\ '</a></li>\\n' elif level == self.last_level + 1: self.nav += ul + '<ul>\\n'", "n): ks = ma.get_filtered_keys(f, logs) sep = ' & ' s = ''", "list(filter(sf, les)) assert(lty(l1, L)) for sec, tests in lfs.items(): tests.sort() # Section header", "Mixed scopes, mixed memory c(L.is_mixed_scope, L.is_mixed_mem) ] return d def get_section_names(): # Parallel", "finish(self, nav=True): self.close_nav(-1) l = [self.prefix] if nav: l += [self.nav] l +=", "pos = args.pos logs = args.input assert(type(logs) == ma.Log) n = 4 l", "# Mixed scopes, shared memory c(L.is_mixed_scope, L.is_shared), # Mixed scopes, mixed memory c(L.is_mixed_scope,", "args.input assert(type(logs) == ma.Log) n = 8 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]',", "memory f = lambda e: L.is_global(e) and (L.is_cta(e) or L.does_match(e, lc)) ks3 =", "s += '</tr>\\n' s += '</table>\\n' return s # Filtering according to scopes", "get_axiom_patterns() h = HtmlFile() all_matching = [] for name, val in l: ks", "sec, tests in lfs.items(): tests.sort() # Section header s += r'{\\bf ' +", "ks_s.sort() filters = get_section_filters() names = get_section_names() for f, name in zip(filters, names):", "link +\\ '</a></li>\\n' else: assert(False) self.last_level = level def close_nav(self, level): sp =", "& R & S & R+S & none & R & S &", "representing litmus test results\"\"\" sp = ' ' # HTML prefix before tables", "+ 1: self.nav += ul + '<ul>\\n' self.nav += li + '<li><a href=\"#id'", "fs2 = [lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)] fs3 = [lambda e:", "& 0b0010) >> 1 i4 = (i & 0b0001) f1 = fs1[i1] f2", "l = l[nl:] return s def latex_tbl(f, logs, n): ks = ma.get_filtered_keys(f, logs)", "<center> To view the logfile for a test, click on the corresponding number.", "& none & R & S & R+S\\\\ \\hline \"\"\") # Scope and", "'.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_html_flat(args): log", "p6.add_argument('input', action=InputAction) f = cmds[5] + '.tex' p6.add_argument('-o', '--out', action='store', default=f) p6.set_defaults(func=partial(mux, latex2))", "incantations_flat(args): log = args.input assert(type(log) == str) chip = os.path.basename(log) assert(type(chip) == str)", "'diro')) l = get_axiom_patterns() h = HtmlFile() all_matching = [] for name, val", "{ padding-top: 5px; } th, td { text-align: right; padding: 5px; padding-right: 15px;", "= produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce flat table with all", "and (L.is_warp(e) or L.does_match(e, lc)) ks2 = ma.get_filtered_keys(f, logs, ks) ks2.sort() assert(len(ks2) ==", "R+S & none & R & S & R+S & none & R", "'--diro', action='store', default='entries') p4.set_defaults(func=partial(mux, two_level)) # Latex p5 = sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction)", "if l.any_key(ks) ] s = '<table>\\n' # Process header s += '<tr>\\n' s", "</tr> <tr> <td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td>", "combination of incantations) are also ignored def incantations(args): log = args.input assert(type(log) ==", "default=f) p7.set_defaults(func=partial(mux, latex3)) # Incantations p8 = sp.add_parser(cmds[7], description='Produce tables comparing the\\ effectiveness", "return lambda e: f(e) and g(e) # List of functions that each take", "f = cmds[7] p8.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default", "if cmd not in cmds: p.print_help() sys.exit(2) print('cmd: ' + cmd) pr =", "to include in the table # logs: list of log objects (only logs", "description and filename suffix sfs = [ (lambda e: L.is_warp(e) and L.is_global(e), 'All", "'IRIW[^+]'] lc = ['CoWW', 'COWW'] ks = ma.get_matching_keys(l, logs) # Names + s1", "args.pos logs = args.input assert(type(logs) == ma.Log) n = 4 l = ['CO',", "s += '\\\\\\\\\\n' s += '\\\\end{tabular}\\n' # Write table to file f_out =", "== L) entry = item.pos # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s +=", "= self.sp while self.last_level > level: self.nav += sp * self.last_level + '</ul>\\n'", "f != incantations and f != incantations_flat and f != incantations_html_flat: c =", "= s.replace('<chip>', chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for t in", "log in logs: # Remove directory prefix and suffix name = os.path.basename(log.fn) idx", "l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue s", "if ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ###", "logs, ks_s) if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 1) s", "i, k in enumerate(ks2): e = ma.get_entry(k, logs) l[i] += str(e.pos) + sep", "+ link +\\ '</a></li>\\n' else: assert(False) self.last_level = level def close_nav(self, level): sp", "# List of functions that each take a log entry d = [", "\\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}", "p10.set_defaults(func=partial(mux, incantations_html_flat)) return p if __name__ == \"__main__\": if len(sys.argv) == 1: sys.argv", "and g(e) # List of functions that each take a log entry d", "l1 = list(filter(sf, les)) assert(lty(l1, L)) for t in short_names: l2 = list(filter(partial(L.simple_match,", "tests in lfs.items(): tests.sort() # Section header s += r'{\\bf ' + sec", "100%;\"> <div class=\"inner\"> <h1>Evaluating incantations</h1> <br> <center> To view the logfile for a", "p5.add_argument('-o', '--out', action='store', default=f) p5.set_defaults(func=partial(mux, latex)) # Latex 2 p6 = sp.add_parser(cmds[5], parents=[parent])", "ks)) l1, l2 = zip(*l) l = interleave(l1, l2, n) s = fill_up(l,", "'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc = ['CoWW', 'COWW'] ks = ma.get_matching_keys(l,", "<html style=\"background:white;\"> <head> <meta charset=\"UTF-8\"> <title>Evaluating incantations</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style>", "latex_tbl(f, logs, n) s += '\\n' # Produce d-cta:s-ker table, global memory f", "Latex p5 = sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction) f = cmds[4] + '.tex' p5.add_argument('-o',", "<td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td>", "to scopes and memory regions def sections(args): pos = args.pos logs = args.input", "up table line by line # l: list of items # sep: separator", "(like rwc) l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0):", "pickle)') f = cmds[9] p10.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of", "+= '<td><a href=\"\">---</a></td>\\n' s += '</tr>\\n' s += '</table>\\n' return s # Filtering", "<tr> <td> </td> <td>1</td> <td>2</td> <td>3</td> <td>4</td> <td>5</td> <td>6</td> <td>7</td> <td>8</td> <td>9</td> <td>10</td>", "# Scope and mem filters, including table description and filename suffix sfs =", "p9.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[8] p9.add_argument('-o', '--out', action='store', default=f,", "list(filter(f, l2)) if item: item = itemify(item) assert(type(item) == L) entry = item.pos", "# Rest all_matching = set(all_matching) if pos: ks_s = ma.get_pos_keys(logs) else: ks_s =", "les)) assert(lty(l1, L)) for sec, tests in lfs.items(): tests.sort() # Section header s", "table, global memory f = lambda e: L.is_global(e) and \\ ((L.is_warp(e) and L.does_match(e,", "ma.get_filtered_keys(f, logs, ks) ks2.sort() assert(len(ks2) == n) for i, k in enumerate(ks2): e", "self.last_level: self.close_nav(level) self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">' +", "args: arguments to the function def mux(f, args): inp = args.input l =", "if nav: l += [self.nav] l += self.items + [self.suffix] self.s = ''.join(l)", "help='output file basename (instead of default name)') p10.add_argument('-d', '--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat))", "############ # Toplevel # ############ # f: function to be called; args: arguments", "ks3 = ma.get_filtered_keys(f, logs, ks) ks3.sort() assert(len(ks3) == n) for i, k in", "L.does_match(e, l))) s += latex_tbl2(f, logs, n) w_str(args.out, s) # ------------------------------------------------------------------------------ ### Produce", "flat p9 = sp.add_parser(cmds[8], description='Produce flat tables comparing the\\ effectiveness of the incantations')", "Toplevel # ############ # f: function to be called; args: arguments to the", "items # sep: separator # end: end of line # n: number of", "['-h'] cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds = ['flat', 'classified', 'sections', 'two-level', 'latex', 'latex2',", "len(l) s = \"\" while l: chunk = l[:nl] line = sep.join(chunk) s", "+ \"Freq.\"] * n) + \"\\\\\\\\\\n\" header += '\\midrule\\n' s = header +", "the effectiveness of the incantations') p10.add_argument('input', action=InputAction, help='log (text or pickle)') f =", "assert(lty(l1, L)) for sec, tests in lfs.items(): tests.sort() # Section header s +=", "list(filter(sf, les)) assert(lty(l1, L)) for t in short_names: l2 = list(filter(partial(L.simple_match, s=t), l1))", "return d def get_section_names(): # Parallel the above functions names = [ 'Different", "<tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td>", "h = HtmlFile() h.add_html(s) h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------ ### Fill up table line", "# Flat p1 = sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction) f = cmds[0] +", "] return l # ------------------------------------------------------------------------------ ############ # Toplevel # ############ # f: function", "(need to be combined to yield a single column # filter) fs1 =", "table, shared memory f = lambda e: L.is_shared(e) and \\ ((L.is_warp(e) and L.does_match(e,", "table line by line # l: list of items # sep: separator #", "short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header # '&nbsp;': non-breaking space", "# Now divide by other sections filters = get_section_filters() names = get_section_names() for", "s = \"\" while l: chunk = l[:nl] line = sep.join(chunk) s +=", "'All threads in different warps, global memory', 's1-global'), (lambda e: L.is_warp(e) and L.is_shared(e),", "and memory regions; no filtering according to # names def get_section_filters(): def c(f,", "scopes, global memory', 'Mixed scopes, shared memory', 'Mixed scopes, mixed memory' ] return", "all_matching ks = list(ks) if ks: h.new_section('Other', 0) ks.sort() s = produce_table(ks, logs)", "inp = ma.get_logs(inp, lh=ma.Log) if not c: inp = inp[0] args.input = inp", "header + fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s def latex_tbl2(f,", "= lambda e: f1(e) and f2(e) and f3(e) and f4(e) entry = '-'", "(len(l2) == 0): continue # Name of test s += '<tr>\\n' s +=", "+= '\\\\end{tabular}\\n' # Write table to file f_out = out_base + '-' +", "header # '&nbsp;': non-breaking space # '&#x2713;': checkmark prefix = textwrap.dedent(r\"\"\" <!DOCTYPE html>", "= ma.get_matching_keys(val, logs) if pos: ks = ma.get_pos_keys(logs, ks) all_matching += ks if", "= args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) s = '' h = HtmlFile() filters", "def c(f, g): return lambda e: f(e) and g(e) # List of functions", "str) chip_old = chip while True: chip = os.path.splitext(chip)[0] if chip == chip_old:", "('thin air', ['lb']) ] lfs = collections.OrderedDict(lfs) for sf, cfg, suf in sfs:", "entry = '-' item = list(filter(f, l2)) if item: item = itemify(item) assert(type(item)", "Name of test s += t for i in range(0, nc): i1 =", "the incantations') p9.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[8] p9.add_argument('-o', '--out',", "sep + \"Freq.\"] * n) + \"\\\\\\\\\\n\" header += '\\midrule\\n' s = header", "sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds = ['flat', 'classified', 'sections', 'two-level', 'latex', 'latex2', 'latex3', 'incantations', 'incantations-flat',", "= fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s ### Produce latex", "</td><td>&#x2713;</td> </tr> \"\"\") # Scope and mem filters, including table description and filename", "L) entry = item.pos # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += '", "Open files and parse or unpickle class InputAction(argparse.Action): def __call__(self, parser, namespace, values,", "help', title= 'subcommands') # Flat p1 = sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction) f", "r'\\\\' + '\\n' for t in tests: # Get all tests that match", "= lambda e: L.is_global(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW',", "\\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl(f, logs,", "return s # Filtering according to scopes and memory regions; no filtering according", "if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) all_matching = set(all_matching)", "= os.path.splitext(chip)[0] if chip == chip_old: break chip_old = chip assert(type(chip) == str)", "s = prefix s = s.replace('<config>', cfg, 1) s = s.replace('<chip>', chip, 1)", "mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower(), str(e.pos) l = list(map(mapper, ks)) l1,", "building blocks (need to be combined to yield a single column # filter)", "\\hline \"\"\") # Scope and mem filters, including table description and filename suffix", "+= '<tr>\\n' le = ma.get_entry(k, logs) s += le.pp_prefix(2) for log in logs:", "link +\\ '</a></li>\\n' elif level < self.last_level: self.close_nav(level) self.nav += li + '<li><a", "for f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs, ks_s) if pos:", "chip assert(type(chip) == str) # Get incantation log log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log,", "+\\ '</a></li>\\n' elif level < self.last_level: self.close_nav(level) self.nav += li + '<li><a href=\"#id'", "chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for t in short_names: l2", "action='store', default=f) p7.set_defaults(func=partial(mux, latex3)) # Incantations p8 = sp.add_parser(cmds[7], description='Produce tables comparing the\\", "short_names: l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue", "c(L.is_mixed_scope, L.is_global), # Mixed scopes, shared memory c(L.is_mixed_scope, L.is_shared), # Mixed scopes, mixed", "and parse or unpickle class InputAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace,", "tables comparing the\\ effectiveness of the incantations') p8.add_argument('input', action=InputAction, help='log (text or pickle)')", "= sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction) f = cmds[3] + '.html' p4.add_argument('-o', '--out',", "space # '&#x2713;': checkmark prefix = textwrap.dedent(r\"\"\" <!DOCTYPE html> <html style=\"background:white;\"> <head> <meta", "textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g g g r r r r", "name, val in l: ks_s = ma.get_matching_keys(val, logs) if pos: ks_s = ma.get_pos_keys(logs,", "p9.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default name)') p9.set_defaults(func=partial(mux, incantations_flat))", "enumerate(ks1): e = ma.get_entry(k, logs) l.append(e.short_name.lower() + sep + str(e.pos) + sep) #", "names to include in the table # logs: list of log objects (only", "lambda e: L.is_shared(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW'])))", "L.is_global(e), 'All threads in different CTAs, global memory', 's2-global') ] # Column filter", "name.find('.') if idx != -1: name = name[:idx] s += ' <th>' +", "sp = ' ' # HTML prefix before tables prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE", "2px solid black; } table { border-top: none; } </style> </head> <body> <div", "<meta charset=\"UTF-8\"> <title>GPU Litmus Test Results</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head> <body>", "latex tables def latex3(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log)", "which have the key are included in the # table) def produce_table(ks, logs,", "</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>", "break chip_old = chip assert(type(chip) == str) # Get incantation log log =", "s.replace('<config>', cfg, 1) s = s.replace('<chip>', chip, 1) l1 = list(filter(sf, les)) assert(lty(l1,", "0): continue # Name of test s += '<tr>\\n' s += '<td>' +", "1) log = log[0] # Prefix of output filename, default is the command", "+= '</table>\\n' return s # Filtering according to scopes and memory regions; no", "'RWC[^+]', 'IRIW[^+]'] # Produce d-warp:s-cta table, global memory f = lambda e: L.is_global(e)", "h.add_html(s) h.finish() h.write(args.out) ### Produce table with sections according to scopes and memory", "w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_flat(args): log =", "'<h4>Contents</h4>\\n' self.secn = 0 self.last_level = -1 def add_nav_item(self, link, level): sp =", "((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl2(f, logs, n)", "p6 = sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction) f = cmds[5] + '.tex' p6.add_argument('-o', '--out',", "l for l in logs if l.any_key(ks) ] s = '<table>\\n' # Process", "ks) ks3.sort() assert(len(ks3) == n) for i, k in enumerate(ks3): e = ma.get_entry(k,", "run. </center> <br><br> \"\"\") # HTML suffix after tables suffix = textwrap.dedent(\"\"\" </div>", "of log objects (only logs which have the key are included in the", "p1.set_defaults(func=partial(mux, flat)) # Classified p2 = sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction) f =", "n = 8 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]',", "ma.get_filtered_keys(f, logs, ks) ks3.sort() assert(len(ks3) == n) for i, k in enumerate(ks3): e", "p8 = sp.add_parser(cmds[7], description='Produce tables comparing the\\ effectiveness of the incantations') p8.add_argument('input', action=InputAction,", "'Different warps, same CTA; shared memory', 'Different warps, same CTA; mixed memory', 'Mixed", "== ma.Log) sep = ' & ' l = ['CO', 'Co', 'LB[^+]', 'MP[^+]',", "'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc = ['CoWW', 'COWW'] ks", "l.append(e.short_name.lower() + sep + str(e.pos) + sep) # s1 + shared memory f", "if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) ks = set(ks) -", "sections) class HtmlFile: \"\"\"Html file representing litmus test results\"\"\" sp = ' '", "action=InputAction) f = cmds[3] + '.html' p4.add_argument('-o', '--out', action='store', default=f) p4.add_argument('-d', '--diro', action='store',", "= [] for name, val in l: ks_s = ma.get_matching_keys(val, logs) if pos:", "import collections import textwrap from functools import partial import machinery as ma from", "title= 'subcommands') # Flat p1 = sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction) f =", "chip_old = chip assert(type(chip) == str) # Get incantation log log = ma.get_logs(log,", "parent for common options parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true') # Subparsers sp", "s ### Produce latex tables def latex(args): pos = args.pos logs = args.input", "level): sp = self.sp while self.last_level > level: self.nav += sp * self.last_level", "sp.add_parser(cmds[7], description='Produce tables comparing the\\ effectiveness of the incantations') p8.add_argument('input', action=InputAction, help='log (text", "logs) h.add_html(s) h.finish() h.write(args.out) ### Two level classification def two_level(args): pos = args.pos", "memory', 'Different warps, same CTA; mixed memory', 'Mixed scopes, global memory', 'Mixed scopes,", "f = lambda e: L.is_shared(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e,", "<td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td>", "</td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\") # Scope and mem filters, including", "chunk = l[:nl] line = sep.join(chunk) s += line + ((nl - len(chunk))", "function def mux(f, args): inp = args.input l = list(listify(inp)) if hasattr(args, 'out'):", "L.is_shared(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s +=", "lc)) ks1 = ma.get_filtered_keys(f, logs, ks) ks1.sort() n = len(ks1) l = list()", "heavy', ['sb', 'rwc', 'iriw', 'r']), ('thin air', ['lb']) ] lfs = collections.OrderedDict(lfs) for", "sep + str(e.pos) + sep) # s1 + shared memory f = lambda", "p10.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default name)') p10.add_argument('-d', '--diro',", "list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue s += t", "# Write table to file f_out = out_base + '-' + suf +", "p7.set_defaults(func=partial(mux, latex3)) # Incantations p8 = sp.add_parser(cmds[7], description='Produce tables comparing the\\ effectiveness of", "name[:idx] s += ' <th>' + name + '</th>\\n' s += '</tr>\\n' #", "l.any_key(ks) ] s = '<table>\\n' # Process header s += '<tr>\\n' s +=", "'(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ]) ] return l # ------------------------------------------------------------------------------ ############ # Toplevel #", "= fs3[i3] f4 = fs4[i4] f = lambda e: f1(e) and f2(e) and", "Produce flat table with all tests def flat(args): pos = args.pos logs =", "== chip_old: break chip_old = chip assert(type(chip) == str) # Get incantation log", "str) log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0]", "16 # Line filters lfs = [ ('uniproc', ['corr', 'corw', 'cowr', 'coww']), ('observation',", "t for i in range(0, nc): i1 = (i & 0b1000) >> 3", "log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header # '&nbsp;': non-breaking space # '&#x2713;':", "'' h = HtmlFile() filters = get_section_filters() names = get_section_names() for f, name", "str(entry) s += '\\\\\\\\\\n' s += '\\\\end{tabular}\\n' # Write table to file f_out", "== str) assert(hasattr(args, 'diro')) chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip", "e: L.is_general_bc(e)] fs3 = [lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)] fs4 =", "# Filtering according to scopes and memory regions; no filtering according to #", "l in logs if l.any_key(ks) ] s = '<table>\\n' # Process header s", "axiom def get_axiom_patterns(): l = [ ('SC per location', ['CO', 'Co']), ('No Thin", "threads in different CTAs, global memory', 's2-global') ] # Column filter building blocks", "= s.replace('<chip>', chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for sec, tests", "ma from machinery import ErrMsg, chk, bail from machinery import LogEntry as L", "w_str(args.out, s) ### Produce latex tables def latex3(args): pos = args.pos logs =", "assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header prefix =", "cmds[4] + '.tex' p5.add_argument('-o', '--out', action='store', default=f) p5.set_defaults(func=partial(mux, latex)) # Latex 2 p6", "l += self.items + [self.suffix] self.s = ''.join(l) def write(self, fn): assert(self.s) f", "############ # f: function to be called; args: arguments to the function def", "l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl(f, logs, n) s += '\\n'", "& \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} &", "table) def produce_table(ks, logs, diro='entries'): logs = [ l for l in logs", "L from generic import lty, interleave, itemify, dupchk, listify, w_str # ------------------------------------------------------------------------------ #", "assert(False) self.last_level = level def close_nav(self, level): sp = self.sp while self.last_level >", "+ '.tex' w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_html_flat(args):", "ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) ks = set(ks) - all_matching ks = list(ks)", "s = '' h = HtmlFile() filters = get_section_filters() names = get_section_names() for", "n) w_str(args.out, s) # ------------------------------------------------------------------------------ ### Produce incantations tables # All tests that", "<td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td>", "log in logs: e = log.get(k) if e: s += e.pp_cell_link_dir(2, diro) #", "n) s += '\\\\bottomrule\\n' return s ### Produce latex tables def latex(args): pos", "+= ' & ' + str(entry) s += '\\\\\\\\\\n' s += '\\\\end{tabular}\\n' #", "listed under 'line filters' in this file # are ignored; non-existing tests and", "------------------------------------------------------------------------------ ####################### # Command line parser # ####################### # Open files and parse", "log[0] out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L)) # Table header", "'COWW']))) s = latex_tbl(f, logs, n) s += '\\n' # Produce d-warp:s-cta table,", "= ma.get_filtered_keys(f, logs) sep = ' & ' s = '' def mapper(k):", "'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc = ['CoWW', 'COWW']", "= list(listify(inp)) if hasattr(args, 'out'): l.append(args.out) chk(not dupchk(l), 'duplicate files given') # Read", "assert(type(log) == str) chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while", "# filter) fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 =", "= ''.join(l) def write(self, fn): assert(self.s) f = open(fn, 'w') f.write(self.s) f.close() #", "[] for name, val in l: ks_s = ma.get_matching_keys(val, logs) if pos: ks_s", "global memory', 's1-global'), (lambda e: L.is_warp(e) and L.is_shared(e), 'All threads in different warps,", "('observation', ['mp', 'isa2', 'wrc']), ('prop light', ['2+2w', 'w+rw+2w', 's']), ('prop heavy', ['sb', 'rwc',", "self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">' + link +\\", "there were insufficient resources on the chip to run the test. </center> <br>", "table description and filename suffix sfs = [ (lambda e: L.is_warp(e) and L.is_global(e),", "ignored def incantations(args): log = args.input assert(type(log) == str) # Get chip name", "f2(e) and f3(e) and f4(e) entry = '-' item = list(filter(f, l2)) if", "sep # s2 + global memory f = lambda e: L.is_global(e) and (L.is_cta(e)", "flat html tables comparing\\ the effectiveness of the incantations') p10.add_argument('input', action=InputAction, help='log (text", "parent.add_argument('-p', '--pos', action='store_true') # Subparsers sp = p.add_subparsers(help='use <subcommand> -h for further help',", "+= self.items + [self.suffix] self.s = ''.join(l) def write(self, fn): assert(self.s) f =", "s def latex_tbl(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep = ' &", "### Two level classification def two_level(args): pos = args.pos logs = args.input assert(lty(logs,", "p3.add_argument('-d', '--diro', action='store', default='entries') p3.set_defaults(func=partial(mux, sections)) # Two-level p4 = sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input',", "lambda e: L.is_shared(e) and (L.is_warp(e) or L.does_match(e, lc)) ks2 = ma.get_filtered_keys(f, logs, ks)", "default='entries') p4.set_defaults(func=partial(mux, two_level)) # Latex p5 = sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction) f =", "header s += '<tr>\\n' s += ' <th>Scope tree</th>\\n' s += ' <th>Memory", "test s += '<tr>\\n' s += '<td>' + t + '</td>' for i", "& \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:} &", "s = '' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower() + sep", "= sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction) f = cmds[1] + '.html' p2.add_argument('-o', '--out',", "e: s += e.pp_cell_link_dir(2, diro) # Produce file containing raw litmus log e.store_log_dir(diro)", "on line def fill_up(l, sep, end, nl): n = len(l) s = \"\"", "sp = self.sp while self.last_level > level: self.nav += sp * self.last_level +", "incantations) are also ignored def incantations(args): log = args.input assert(type(log) == str) #", "e: L.is_global(e) and (L.is_cta(e) or L.does_match(e, lc)) ks3 = ma.get_filtered_keys(f, logs, ks) ks3.sort()", "self.items + [self.suffix] self.s = ''.join(l) def write(self, fn): assert(self.s) f = open(fn,", "else: ks = ma.get_keys(logs) s = produce_table(ks, logs, diro=args.diro) h = HtmlFile() h.add_html(s)", "sp.add_parser(cmds[8], description='Produce flat tables comparing the\\ effectiveness of the incantations') p9.add_argument('input', action=InputAction, help='log", "# Subcommands # ############### ### Produce table with sections according to axioms def", "((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl(f, logs, n)", "= ma.get_entry(k, logs) s += le.pp_prefix(2) for log in logs: e = log.get(k)", "args.pos logs = args.input assert(type(logs) == ma.Log) sep = ' & ' l", "= cmds[1] + '.html' p2.add_argument('-o', '--out', action='store', default=f) p2.add_argument('-d', '--diro', action='store', default='entries') p2.set_defaults(func=partial(mux,", "l1, l2 = zip(*l) l = interleave(l1, l2, n) s = fill_up(l, sep,", "# Start new row s += '<tr>\\n' le = ma.get_entry(k, logs) s +=", "if pos: ks = ma.get_pos_keys(logs, ks) all_matching += ks if ks: h.new_section(name, 0)", "0b0010) >> 1 i4 = (i & 0b0001) f1 = fs1[i1] f2 =", "####################### # Command line parser # ####################### # Open files and parse or", "according to axioms def classified(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log))", "Incantations p8 = sp.add_parser(cmds[7], description='Produce tables comparing the\\ effectiveness of the incantations') p8.add_argument('input',", "listify, w_str # ------------------------------------------------------------------------------ # Html file (including navigation and sections) class HtmlFile:", "S & R+S & none & R & S & R+S\\\\ \\hline \"\"\")", "sp.add_parser(cmds[9], description='Produce flat html tables comparing\\ the effectiveness of the incantations') p10.add_argument('input', action=InputAction,", "the command name out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L)) short_names", "fn): assert(self.s) f = open(fn, 'w') f.write(self.s) f.close() # ------------------------------------------------------------------------------ ### Used by", "file (including navigation and sections) class HtmlFile: \"\"\"Html file representing litmus test results\"\"\"", "!= -1: name = name[:idx] s += ' <th>' + name + '</th>\\n'", "k in ks: # Start new row s += '<tr>\\n' le = ma.get_entry(k,", "zip(filters, names): ks = ma.get_filtered_keys(f, logs) if pos: ks = ma.get_pos_keys(logs, ks) if", "When a dash appears instead of a result, it is either because optcheck", "f = cmds[8] p9.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default", "'Mixed scopes, mixed memory' ] return names # Get key patterns per axiom", "assert(lty(l2, L)) if (len(l2) == 0): continue # Name of test s +=", "def incantations(args): log = args.input assert(type(log) == str) # Get chip name chip", "in enumerate(ks3): e = ma.get_entry(k, logs) l[i] += str(e.pos) + '\\\\\\\\' s =", "e: L.is_shared(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s", "fs3 = [lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)] fs4 = [lambda e:", "entry = item.pos # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += ' &", "or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl2(f, logs, n) s += '\\n' #", "action='store', default=f, help='output file basename (instead of default name)') p10.add_argument('-d', '--diro', action='store', default='entries-inc')", "'two-level', 'latex', 'latex2', 'latex3', 'incantations', 'incantations-flat', 'incantations-html'] p = get_cmdline_parser(cmds) if cmd not", "before tables prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html> <head> <meta charset=\"UTF-8\"> <title>GPU Litmus", "+ (' &' * nc) + r'\\\\' + '\\n' for t in tests:", "'--out', action='store', default=f) p4.add_argument('-d', '--diro', action='store', default='entries') p4.set_defaults(func=partial(mux, two_level)) # Latex p5 =", "[self.nav] l += self.items + [self.suffix] self.s = ''.join(l) def write(self, fn): assert(self.s)", "l = str(level+2) s = '<h' + l + '><a id=\"id' + str(self.secn)", "corresponding number. The logfile contains the litmus test code, and the incantations used", "L.is_global), # Simple scopes, shared memory c(L.is_warp, L.is_shared), # Simple scopes, mixed memory", "# HTML prefix before tables prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html> <head> <meta", "'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] # Produce d-warp:s-cta table, global memory f", "different CTAs, global memory', 's2-global') ] # Column filters fs1 = [lambda e:", "__name__ == \"__main__\": if len(sys.argv) == 1: sys.argv += ['-h'] cmd = sys.argv[1]", "flat incantation tables def incantations_flat(args): log = args.input assert(type(log) == str) chip =", "two_level(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l =", "L.is_global), # Mixed scopes, shared memory c(L.is_mixed_scope, L.is_shared), # Mixed scopes, mixed memory", "produce_table(ks, logs, diro=args.diro) h.add_html(s) # Rest all_matching = set(all_matching) if pos: ks_s =", "sections according to scopes and memory regions def sections(args): pos = args.pos logs", "{ border-bottom: 2px solid black; } table { border-top: none; } </style> </head>", "ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] # Prefix of", "'.html' p3.add_argument('-o', '--out', action='store', default=f) p3.add_argument('-d', '--diro', action='store', default='entries') p3.set_defaults(func=partial(mux, sections)) # Two-level", "<= level <= 2) l = str(level+2) s = '<h' + l +", "p2.add_argument('-o', '--out', action='store', default=f) p2.add_argument('-d', '--diro', action='store', default='entries') p2.set_defaults(func=partial(mux, classified)) # Sections p3", "same CTA; mixed memory', 'Mixed scopes, global memory', 'Mixed scopes, shared memory', 'Mixed", "table with all tests def flat(args): pos = args.pos logs = args.input assert(lty(logs,", "+= t for i in range(0, nc): i1 = (i & 0b1000) >>", "l2)) if item: item = itemify(item) assert(type(item) == L) entry = item.pos #", "s = '<h' + l + '><a id=\"id' + str(self.secn) + '\">' +", "class InputAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) def get_cmdline_parser(cmds):", "logs, n) s += '\\n' # Produce d-cta:s-ker table, global memory f =", "n) s += '\\n' # Produce d-warp:s-cta table, shared memory f = lambda", "& S & R+S & none & R & S & R+S\\\\ \\hline", "of items # sep: separator # end: end of line # n: number", "& S & R+S\\\\ \\hline \"\"\") # Scope and mem filters, including table", "nc = 16 # Scope and mem filters, table description, filename suffix for", "test names to include in the table # logs: list of log objects", "h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) # Rest all_matching = set(all_matching)", "& none & R & S & R+S & none & R &", "def get_cmdline_parser(cmds): # Parent of all p = argparse.ArgumentParser() # Dummy parent for", "pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) # Get all", "# Toplevel # ############ # f: function to be called; args: arguments to", "ks2 = ma.get_filtered_keys(f, logs, ks) ks2.sort() assert(len(ks2) == n) for i, k in", "= cmds[3] + '.html' p4.add_argument('-o', '--out', action='store', default=f) p4.add_argument('-d', '--diro', action='store', default='entries') p4.set_defaults(func=partial(mux,", "chip_old = chip while True: chip = os.path.splitext(chip)[0] if chip == chip_old: break", "type=\"text/css\" media=\"screen\"/> <style> ul { padding-top: 10px; } li { padding-top: 5px; }", "f = cmds[9] p10.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of default", "<head> <meta charset=\"UTF-8\"> <title>Evaluating incantations</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style> ul {", "keys if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) s = produce_table(ks,", "p9 = sp.add_parser(cmds[8], description='Produce flat tables comparing the\\ effectiveness of the incantations') p9.add_argument('input',", "action='store', default='entries') p2.set_defaults(func=partial(mux, classified)) # Sections p3 = sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction)", "</div> </body> </html> \"\"\" # Write table to file f_out = out_base +", "parser # ####################### # Open files and parse or unpickle class InputAction(argparse.Action): def", "also contains the litmus test code. When a dash appears instead of a", "from functools import partial import machinery as ma from machinery import ErrMsg, chk,", "(including navigation and sections) class HtmlFile: \"\"\"Html file representing litmus test results\"\"\" sp", "['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]']", "list(ks) if ks: h.new_section('Other', 0) ks.sort() s = produce_table(ks, logs) h.add_html(s) h.finish() h.write(args.out)", "e: L.is_cta(e) and L.is_global(e), 'All threads in different CTAs, global memory', 's2-global') ]", "args.input assert(type(logs) == ma.Log) n = 4 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]',", "media=\"screen\"/> </head> <body> <div class=\"outer\"> <div class=\"inner\"> <h1>GPU Litmus Test Results</h1> <br> <center>", "same CTA; shared memory', 'Different warps, same CTA; mixed memory', 'Mixed scopes, global", "directory prefix and suffix name = os.path.basename(log.fn) idx = name.find('.') if idx !=", "Subparsers sp = p.add_subparsers(help='use <subcommand> -h for further help', title= 'subcommands') # Flat", "= cmds[5] + '.tex' p6.add_argument('-o', '--out', action='store', default=f) p6.set_defaults(func=partial(mux, latex2)) # Latex 3", "+ sep + str(e.pos) l = list(map(mapper, ks)) header = sep.join([\"Test\" + sep", "arguments to the function def mux(f, args): inp = args.input l = list(listify(inp))", "\"\\\\\\\\\\n\" header += '\\midrule\\n' s = header + fill_up(l, sep, '\\\\\\\\\\n', n) s", "global memory f = lambda e: L.is_global(e) and \\ ((L.is_warp(e) and L.does_match(e, l))", "Table header # '&nbsp;': non-breaking space # '&#x2713;': checkmark prefix = textwrap.dedent(r\"\"\" <!DOCTYPE", "lambda e: L.is_rand_threads(e)] nc = 16 # Line filters lfs = [ ('uniproc',", "p2.add_argument('-d', '--diro', action='store', default='entries') p2.set_defaults(func=partial(mux, classified)) # Sections p3 = sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input',", "= sp * (level + 1) ul = sp * (self.last_level + 1)", "logs, ks) ks2.sort() assert(len(ks2) == n) for i, k in enumerate(ks2): e =", "<style> ul { padding-top: 10px; } li { padding-top: 5px; } th, td", "latex_tbl2(f, logs, n) s += '\\n' # Produce d-warp:s-cta table, shared memory f", "L.is_general_bc(e)] fs3 = [lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)] fs4 = [lambda", "</td> </tr> </table> </center> <br> <table> <tr> <td> </td> <td>1</td> <td>2</td> <td>3</td> <td>4</td>", "w_str(f_out, s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_html_flat(args): log =", "(len(l2) == 0): continue s += t for i in range(0, nc): i1", "filter) fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 = [lambda", "table to file f_out = out_base + '-' + suf + '.tex' w_str(f_out,", "<td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\") # Scope", "style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip> </td> </tr> <tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config>", "tree</th>\\n' s += ' <th>Memory map</th>\\n' s += ' <th>Name</th>\\n' for log in", "'subcommands') # Flat p1 = sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction) f = cmds[0]", "setattr(namespace, self.dest, values) def get_cmdline_parser(cmds): # Parent of all p = argparse.ArgumentParser() #", "' + str(entry) s += '\\\\\\\\\\n' s += '\\\\hline\\n' s += '\\\\end{tabular}\\n' #", "import os import sys import collections import textwrap from functools import partial import", "e.pp_cell_link_dir(2, diro) # Produce file containing raw litmus log e.store_log_dir(diro) else: s +=", "warps, same CTA; shared memory', 'Different warps, same CTA; mixed memory', 'Mixed scopes,", "and sections) class HtmlFile: \"\"\"Html file representing litmus test results\"\"\" sp = '", "} th, td { text-align: right; padding: 5px; padding-right: 15px; padding-left: 15px; }", "[ 'Different warps, same CTA; global memory', 'Different CTAs, same kernel; global memory',", "= ma.get_matching_keys(val, logs) if pos: ks_s = ma.get_pos_keys(logs, ks_s) all_matching += ks_s if", "len(ks1) l = list() for i, k in enumerate(ks1): e = ma.get_entry(k, logs)", "== n) for i, k in enumerate(ks2): e = ma.get_entry(k, logs) l[i] +=", "containing raw litmus log item.store_log_dir(args.diro) else: # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s", "les = log.get_all() assert(lty(les, L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table", "str) chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while True: chip", "pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) s = produce_table(ks, logs, diro=args.diro)", "raw litmus log item.store_log_dir(args.diro) else: # ppi_incantations: mem_stress, general_bc, barrier, rand_threads s +=", "'ISA2']), ('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW'", "h.write(args.out) ### Two level classification def two_level(args): pos = args.pos logs = args.input", "# Prefix of output filename, default is the command name out_base = args.out", "the logfile for a test and chip, click on the corresponding number. The", "'duplicate files given') # Read ordinary logs (if we do not want to", "if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 0) s = produce_table(ks,", "& \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra", "'All threads in different warps, shared memory', 's1-shared'), (lambda e: L.is_cta(e) and L.is_global(e),", "not L.is_general_bc(e), lambda e: L.is_general_bc(e)] fs3 = [lambda e: not L.is_barrier(e), lambda e:", "self.last_level -= 1 def new_section(self, heading, level): assert(0 <= level <= 2) l", "l1)) assert(lty(l2, L)) if (len(l2) == 0): continue s += t for i", "in enumerate(ks2): e = ma.get_entry(k, logs) l[i] += str(e.pos) + sep # s2", "self.sp li = sp * (level + 1) ul = sp * (self.last_level", "f3(e) and f4(e) entry = '-' item = list(filter(f, l2)) if item: item", "'--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return p if __name__ == \"__main__\": if len(sys.argv)", "list(map(mapper, ks)) l1, l2 = zip(*l) l = interleave(l1, l2, n) s =", "tr:nth-child(1), tr:nth-child(5) { border-bottom: 2px solid black; } table { border-top: none; }", "\\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} &", "e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 = [lambda e: not L.is_general_bc(e), lambda", "Column filter building blocks (need to be combined to yield a single column", "cmds[3] + '.html' p4.add_argument('-o', '--out', action='store', default=f) p4.add_argument('-d', '--diro', action='store', default='entries') p4.set_defaults(func=partial(mux, two_level))", "basename (instead of default name)') p8.set_defaults(func=partial(mux, incantations)) # Incantations flat p9 = sp.add_parser(cmds[8],", "('prop light', ['2+2w', 'w+rw+2w', 's']), ('prop heavy', ['sb', 'rwc', 'iriw', 'r']), ('thin air',", "ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += '<td>' + str(entry) + '</td>' s", "including table description and filename suffix sfs = [ (lambda e: L.is_warp(e) and", "<config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical", "+ 1) if level == self.last_level: self.nav += li + '<li><a href=\"#id' +", "ma.get_matching_keys(l, logs) # Names + s1 + global memory f = lambda e:", "1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for sec, tests in lfs.items(): tests.sort()", "+= ' <th>Memory map</th>\\n' s += ' <th>Name</th>\\n' for log in logs: #", "h = HtmlFile() all_matching = [] for name, val in l: ks =", "p5.add_argument('input', action=InputAction) f = cmds[4] + '.tex' p5.add_argument('-o', '--out', action='store', default=f) p5.set_defaults(func=partial(mux, latex))", "pickle)') f = cmds[7] p8.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead of", "f = cmds[2] + '.html' p3.add_argument('-o', '--out', action='store', default=f) p3.add_argument('-d', '--diro', action='store', default='entries')", "assert(type(chip) == str) chip_old = chip while True: chip = os.path.splitext(chip)[0] if chip", "a test and chip, click on the corresponding number. The logfile contains the", "s) # ------------------------------------------------------------------------------ ####################### # Command line parser # ####################### # Open files", "# are ignored; non-existing tests and non-existing entries (e.g. for a certain #", "in different warps, shared memory', 's1-shared'), (lambda e: L.is_cta(e) and L.is_global(e), 'All threads", "the test. </center> <br> <center> <table style=\"border:none\"> <tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\">", "assert(out_base) les = log.get_all() assert(lty(les, L)) # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85}", "logs: # Remove directory prefix and suffix name = os.path.basename(log.fn) idx = name.find('.')", "+= '</tr>\\n' s += \"\"\" </table> </div> </div> </body> </html> \"\"\" # Write", "not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Line filters lfs =", "logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) s = '' h = HtmlFile()", "str(e.pos) l = list(map(mapper, ks)) l1, l2 = zip(*l) l = interleave(l1, l2,", "== ma.Log) n = 8 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]',", "that each take a log entry d = [ # Simple scopes, global", "f = cmds[3] + '.html' p4.add_argument('-o', '--out', action='store', default=f) p4.add_argument('-d', '--diro', action='store', default='entries')", "= log[0] # Prefix of output filename, default is the command name out_base", "Produce table with sections according to scopes and memory regions def sections(args): pos", "l2)) if item: item = itemify(item) assert(type(item) == L) entry = item.pos s", "parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) def get_cmdline_parser(cmds): # Parent of all", "= ma.get_entry(k, logs) l[i] += str(e.pos) + sep # s2 + global memory", "(text or pickle)') f = cmds[9] p10.add_argument('-o', '--out', action='store', default=f, help='output file basename", "resources on the chip to run the test. </center> <br> <center> <table style=\"border:none\">", "p = argparse.ArgumentParser() # Dummy parent for common options parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p',", "values, option_string=None): setattr(namespace, self.dest, values) def get_cmdline_parser(cmds): # Parent of all p =", "' & ' s = '\\midrule\\n' def mapper(k): e = ma.get_entry(k, logs) return", "s += le.pp_prefix(2) for log in logs: e = log.get(k) if e: s", "= produce_table(ks, logs, diro=args.diro) h.add_html(s) # Rest all_matching = set(all_matching) if pos: ks_s", "a simple test name (like rwc) l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L))", "text-align: left; } tr:nth-child(1), tr:nth-child(5) { border-bottom: 2px solid black; } table {", "self.last_level: self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">' + link", "= ma.get_keys(logs) ks = set(ks) - all_matching ks = list(ks) if ks: h.new_section('Other',", "global memory', 'Mixed scopes, shared memory', 'Mixed scopes, mixed memory' ] return names", "ks = ma.get_filtered_keys(f, logs) if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name,", "# s2 + global memory f = lambda e: L.is_global(e) and (L.is_cta(e) or", "# Get all tests that match a simple test name (like rwc) l2", "'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] # Produce d-warp:s-cta", "def get_section_names(): # Parallel the above functions names = [ 'Different warps, same", "############### ### Produce table with sections according to axioms def classified(args): pos =", "nc): i1 = (i & 0b1000) >> 3 i2 = (i & 0b0100)", "namespace, values, option_string=None): setattr(namespace, self.dest, values) def get_cmdline_parser(cmds): # Parent of all p", "mixed memory c(L.is_warp, L.is_mixed_mem), # Mixed scopes, global memory c(L.is_mixed_scope, L.is_global), # Mixed", "h.write(args.out) ### Produce flat table with all tests def flat(args): pos = args.pos", "'&nbsp;': non-breaking space # '&#x2713;': checkmark prefix = textwrap.dedent(r\"\"\" <!DOCTYPE html> <html style=\"background:white;\">", "logfile also contains the litmus test code. When a dash appears instead of", "all_matching = [] for name, val in l: ks_s = ma.get_matching_keys(val, logs) if", "c: inp = [inp] inp = ma.get_logs(inp, lh=ma.Log) if not c: inp =", "and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl2(f, logs, n) s", "'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] # Produce d-warp:s-cta table, global memory f = lambda", "from machinery import ErrMsg, chk, bail from machinery import LogEntry as L from", "w_str(args.out, s) # ------------------------------------------------------------------------------ ### Produce incantations tables # All tests that are", "an incantation log) if f != incantations and f != incantations_flat and f", "file representing litmus test results\"\"\" sp = ' ' # HTML prefix before", "li { padding-top: 5px; } th, td { text-align: right; padding: 5px; padding-right:", "<td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td>", "<tr> <td>memory&nbsp;stress</td> <td> </td><td> </td><td> </td><td> </td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td>", "import LogEntry as L from generic import lty, interleave, itemify, dupchk, listify, w_str", "suffix name = os.path.basename(log.fn) idx = name.find('.') if idx != -1: name =", "style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config> </td> </tr> </table> </center> <br> <table> <tr> <td> </td>", "p3 = sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction) f = cmds[2] + '.html' p3.add_argument('-o',", "logs if l.any_key(ks) ] s = '<table>\\n' # Process header s += '<tr>\\n'", "(L.is_warp(e) or L.does_match(e, lc)) ks1 = ma.get_filtered_keys(f, logs, ks) ks1.sort() n = len(ks1)", "+ fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s def latex_tbl2(f, logs,", "action='store', default=f, help='output file basename (instead of default name)') p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations", "(lambda e: L.is_cta(e) and L.is_global(e), 'All threads in different CTAs, global memory', 's2-global')", "filters lfs = [ ('uniproc', ['corr', 'corw', 'cowr', 'coww']), ('observation', ['mp', 'isa2', 'wrc']),", "'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc =", "'<td>' + str(entry) + '</td>' s += '</tr>\\n' s += \"\"\" </table> </div>", "if e: s += e.pp_cell_link_dir(2, diro) # Produce file containing raw litmus log", "Flat p1 = sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction) f = cmds[0] + '.html'", "chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while True: chip =", "'isa2', 'wrc']), ('prop light', ['2+2w', 'w+rw+2w', 's']), ('prop heavy', ['sb', 'rwc', 'iriw', 'r']),", "default='entries') p1.set_defaults(func=partial(mux, flat)) # Classified p2 = sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction) f", "test name (like rwc) l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2)", "result, it is either because optcheck failed or because there were insufficient resources", "<head> <meta charset=\"UTF-8\"> <title>GPU Litmus Test Results</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head>", "barrier, rand_threads s += '<td>' + str(entry) + '</td>' s += '</tr>\\n' s", "<td style=\"text-align:left\"> <chip> </td> </tr> <tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config> </td>", "\\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none}", "All tests that are not explicitely listed under 'line filters' in this file", "name chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while True: chip", "log e.store_log_dir(diro) else: s += '<td><a href=\"\">---</a></td>\\n' s += '</tr>\\n' s += '</table>\\n'", "= '-' item = list(filter(f, l2)) if item: item = itemify(item) assert(type(item) ==", "chip_old: break chip_old = chip assert(type(chip) == str) # Get incantation log log", "assert(len(log) == 1) log = log[0] out_base = args.out assert(out_base) les = log.get_all()", "action='store', default=f) p1.add_argument('-d', '--diro', action='store', default='entries') p1.set_defaults(func=partial(mux, flat)) # Classified p2 = sp.add_parser(cmds[1],", "of test s += '<tr>\\n' s += '<td>' + t + '</td>' for", "Incantations:} & \\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\ & none &", "# table) def produce_table(ks, logs, diro='entries'): logs = [ l for l in", "out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L)) # Table header prefix", "latex tables def latex(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log)", "ks) ks1.sort() n = len(ks1) l = list() for i, k in enumerate(ks1):", "l: ks_s = ma.get_matching_keys(val, logs) if pos: ks_s = ma.get_pos_keys(logs, ks_s) all_matching +=", "tr:nth-child(5) { border-bottom: 2px solid black; } table { border-top: none; } </style>", "Litmus Test Results</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head> <body> <div class=\"outer\"> <div", "+ global memory f = lambda e: L.is_global(e) and (L.is_warp(e) or L.does_match(e, lc))", "Start new row s += '<tr>\\n' le = ma.get_entry(k, logs) s += le.pp_prefix(2)", "log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] out_base", "get_section_filters(): def c(f, g): return lambda e: f(e) and g(e) # List of", "dupchk(l), 'duplicate files given') # Read ordinary logs (if we do not want", "'classified', 'sections', 'two-level', 'latex', 'latex2', 'latex3', 'incantations', 'incantations-flat', 'incantations-html'] p = get_cmdline_parser(cmds) if", "effectiveness of the incantations') p9.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[8]", "* n) + \"\\\\\\\\\\n\" header += '\\midrule\\n' s = header + fill_up(l, sep,", "d-warp:s-cta table, shared memory f = lambda e: L.is_shared(e) and \\ ((L.is_warp(e) and", "'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc = ['CoWW', 'COWW'] ks = ma.get_matching_keys(l, logs) # Names", "basename (instead of default name)') p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations html p10 = sp.add_parser(cmds[9],", "r r r g g g g r r r r} \\toprule \\multicolumn{17}{l}{Chip:", "none & R & S & R+S & none & R & S", "help='log (text or pickle)') f = cmds[9] p10.add_argument('-o', '--out', action='store', default=f, help='output file", "memory f = lambda e: L.is_global(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or", "cmds[1] + '.html' p2.add_argument('-o', '--out', action='store', default=f) p2.add_argument('-d', '--diro', action='store', default='entries') p2.set_defaults(func=partial(mux, classified))", "+= '<tr>\\n' s += '<td>' + t + '</td>' for i in range(0,", "k in enumerate(ks3): e = ma.get_entry(k, logs) l[i] += str(e.pos) + '\\\\\\\\' s", "* (self.last_level + 1) if level == self.last_level: self.nav += li + '<li><a", "self.last_level = level def close_nav(self, level): sp = self.sp while self.last_level > level:", "h.finish(nav=False) h.write(args.out) # ------------------------------------------------------------------------------ ### Fill up table line by line # l:", "p.add_subparsers(help='use <subcommand> -h for further help', title= 'subcommands') # Flat p1 = sp.add_parser(cmds[0],", "after tables suffix = textwrap.dedent(\"\"\" </div> </div> </body> </html> \"\"\") def __init__(self): self.items", "L.is_rand_threads(e)] nc = 16 # Scope and mem filters, table description, filename suffix", "<tr> <td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td>", "= [ 'Different warps, same CTA; global memory', 'Different CTAs, same kernel; global", "padding-top: 10px; } li { padding-top: 5px; } th, td { text-align: right;", "# Name of test s += '<tr>\\n' s += '<td>' + t +", "Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ &", "Dummy parent for common options parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true') # Subparsers", "L)) # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g", "if chip == chip_old: break chip_old = chip assert(type(chip) == str) log =", "} td:nth-child(1) { text-align: left; } tr:nth-child(1), tr:nth-child(5) { border-bottom: 2px solid black;", "are ignored; non-existing tests and non-existing entries (e.g. for a certain # combination", "table, global memory f = lambda e: L.is_global(e) and \\ ((L.is_cta(e) and L.does_match(e,", "list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue # Name of", "} li { padding-top: 5px; } th, td { text-align: right; padding: 5px;", "r'{\\bf ' + sec + '}' + (' &' * nc) + r'\\\\'", "in sfs: s = prefix s = s.replace('<config>', cfg, 1) s = s.replace('<chip>',", "# Incantations html p10 = sp.add_parser(cmds[9], description='Produce flat html tables comparing\\ the effectiveness", "str(e.pos) + sep) # s1 + shared memory f = lambda e: L.is_shared(e)", "'latex2', 'latex3', 'incantations', 'incantations-flat', 'incantations-html'] p = get_cmdline_parser(cmds) if cmd not in cmds:", "collections.OrderedDict(lfs) for sf, cfg, suf in sfs: s = prefix s = s.replace('<config>',", "h.new_section(name, 0) # Now divide by other sections filters = get_section_filters() names =", "### Produce latex tables def latex(args): pos = args.pos logs = args.input assert(type(logs)", "+ '.tex' p6.add_argument('-o', '--out', action='store', default=f) p6.set_defaults(func=partial(mux, latex2)) # Latex 3 p7 =", "+= '\\midrule\\n' s = header + fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n'", "True: chip = os.path.splitext(chip)[0] if chip == chip_old: break chip_old = chip assert(type(chip)", "args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l = get_axiom_patterns() h = HtmlFile() all_matching =", "ma.Log) n = 8 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]',", "'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc", "tests and non-existing entries (e.g. for a certain # combination of incantations) are", "c(L.is_ker, L.is_global), # Simple scopes, shared memory c(L.is_warp, L.is_shared), # Simple scopes, mixed", "level: self.nav += sp * self.last_level + '</ul>\\n' self.last_level -= 1 def new_section(self,", "CTAs, same kernel; global memory', 'Different kernels, same device; global memory', 'Different warps,", "('prop heavy', ['sb', 'rwc', 'iriw', 'r']), ('thin air', ['lb']) ] lfs = collections.OrderedDict(lfs)", "href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' else: assert(False) self.last_level =", "textwrap from functools import partial import machinery as ma from machinery import ErrMsg,", "that are not explicitely listed under 'line filters' in this file # are", "= [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Line", "ul = sp * (self.last_level + 1) if level == self.last_level: self.nav +=", "h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) all_matching = set(all_matching) if pos:", "' + str(entry) s += '\\\\\\\\\\n' s += '\\\\end{tabular}\\n' # Write table to", "li + '<li><a href=\"#id' + str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif", "text-align: right; padding: 5px; padding-right: 15px; padding-left: 15px; } td:nth-child(1) { text-align: left;", "a single column # filter) fs1 = [lambda e: not L.is_mem_stress(e), lambda e:", "if ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) # Rest all_matching", "l: ks = ma.get_matching_keys(val, logs) if pos: ks = ma.get_pos_keys(logs, ks) all_matching +=", "e.short_name.lower(), str(e.pos) l = list(map(mapper, ks)) l1, l2 = zip(*l) l = interleave(l1,", "n) for i, k in enumerate(ks2): e = ma.get_entry(k, logs) l[i] += str(e.pos)", "ks1 = ma.get_filtered_keys(f, logs, ks) ks1.sort() n = len(ks1) l = list() for", "shared memory f = lambda e: L.is_shared(e) and (L.is_warp(e) or L.does_match(e, lc)) ks2", "and (L.is_cta(e) or L.does_match(e, lc)) ks3 = ma.get_filtered_keys(f, logs, ks) ks3.sort() assert(len(ks3) ==", "incantations_html_flat(args): log = args.input assert(type(log) == str) assert(hasattr(args, 'diro')) chip = os.path.basename(log) assert(type(chip)", "'2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] # Produce d-warp:s-cta table, global memory", "ma.get_entry(k, logs) l[i] += str(e.pos) + '\\\\\\\\' s = '\\n'.join(l) w_str(args.out, s) ###", "n) w_str(args.out, s) def latex2(args): pos = args.pos logs = args.input assert(type(logs) ==", "</center> <br><br> \"\"\") # HTML suffix after tables suffix = textwrap.dedent(\"\"\" </div> </div>", "logs = args.input assert(type(logs) == ma.Log) n = 4 l = ['CO', 'Co',", "a dash appears instead of a result, it is either because optcheck failed", "'</td>' s += '</tr>\\n' s += \"\"\" </table> </div> </div> </body> </html> \"\"\"", "Produce latex tables def latex3(args): pos = args.pos logs = args.input assert(type(logs) ==", "# Produce d-warp:s-cta table, shared memory f = lambda e: L.is_shared(e) and \\", "general_bc, barrier, rand_threads s += '<td>' + str(entry) + '</td>' s += '</tr>\\n'", "filters fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 = [lambda", "= item.pos s += item.pp_cell_link_dir(2, args.diro) # Produce file containing raw litmus log", "<th>Scope tree</th>\\n' s += ' <th>Memory map</th>\\n' s += ' <th>Name</th>\\n' for log", "f = cmds[1] + '.html' p2.add_argument('-o', '--out', action='store', default=f) p2.add_argument('-d', '--diro', action='store', default='entries')", "for sec, tests in lfs.items(): tests.sort() # Section header s += r'{\\bf '", "args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) # Get all the keys", "= ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s)", "== 0): continue s += t for i in range(0, nc): i1 =", "s += '\\\\bottomrule\\n' return s ### Produce latex tables def latex(args): pos =", "the corresponding number. The logfile contains the litmus test code, and the incantations", "fs4[i4] f = lambda e: f1(e) and f2(e) and f3(e) and f4(e) entry", "+= '<td>' + str(entry) + '</td>' s += '</tr>\\n' s += \"\"\" </table>", "g r r r r g g g g r r r r}", "= sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction) f = cmds[4] + '.tex' p5.add_argument('-o', '--out', action='store',", "and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl2(f,", "filters, including table description and filename suffix sfs = [ (lambda e: L.is_warp(e)", "self.last_level + 1: self.nav += ul + '<ul>\\n' self.nav += li + '<li><a", "f = lambda e: L.is_global(e) and (L.is_cta(e) or L.does_match(e, lc)) ks3 = ma.get_filtered_keys(f,", "[lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)] fs3 = [lambda e: not L.is_barrier(e),", "+= le.pp_prefix(2) for log in logs: e = log.get(k) if e: s +=", "HtmlFile() all_matching = [] for name, val in l: ks_s = ma.get_matching_keys(val, logs)", "<table style=\"border:none\"> <tr style=\"border:none\"> <td style=\"text-align:left\">Chip:</td> <td style=\"text-align:left\"> <chip> </td> </tr> <tr style=\"border:none\">", "# names def get_section_filters(): def c(f, g): return lambda e: f(e) and g(e)", "different warps, shared memory', 's1-shared'), (lambda e: L.is_cta(e) and L.is_global(e), 'All threads in", "header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g g g g r", "sections filters = get_section_filters() names = get_section_names() for f, name in zip(filters, names):", "pos = args.pos logs = args.input assert(type(logs) == ma.Log) n = 8 l", "+= '\\n' # Produce d-warp:s-cta table, shared memory f = lambda e: L.is_shared(e)", "g(e) # List of functions that each take a log entry d =", "L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16 # Scope and mem filters, table", "ma.setup_err_handling('log2tbl.py') cmds = ['flat', 'classified', 'sections', 'two-level', 'latex', 'latex2', 'latex3', 'incantations', 'incantations-flat', 'incantations-html']", "s += \"\"\" </table> </div> </div> </body> </html> \"\"\" # Write table to", "scopes, shared memory c(L.is_warp, L.is_shared), # Simple scopes, mixed memory c(L.is_warp, L.is_mixed_mem), #", "= 4 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$',", "args.input assert(type(log) == str) chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip", "parse or unpickle class InputAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest,", "<td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr>", "memory c(L.is_warp, L.is_shared), # Simple scopes, mixed memory c(L.is_warp, L.is_mixed_mem), # Mixed scopes,", "+= latex_tbl2(f, logs, n) s += '\\n' # Produce d-cta:s-ker table, global memory", "h.finish() h.write(args.out) ### Produce table with sections according to scopes and memory regions", "'<h' + l + '><a id=\"id' + str(self.secn) + '\">' + heading +", "continue # Name of test s += t for i in range(0, nc):", "'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc = ['CoWW',", "'Mixed scopes, shared memory', 'Mixed scopes, mixed memory' ] return names # Get", "heading + '</a></h'\\ + l + '>\\n' self.items.append(s) self.add_nav_item(heading, level) self.secn += 1", "default=f, help='output file basename (instead of default name)') p10.add_argument('-d', '--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux,", "<chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical", "= ma.get_pos_keys(logs) else: ks_s = ma.get_keys(logs) ks_s = set(ks_s) - all_matching ks_s =", "f = lambda e: f1(e) and f2(e) and f3(e) and f4(e) entry =", "['flat', 'classified', 'sections', 'two-level', 'latex', 'latex2', 'latex3', 'incantations', 'incantations-flat', 'incantations-html'] p = get_cmdline_parser(cmds)", "R+S & none & R & S & R+S\\\\ \\hline \"\"\") # Scope", "# Produce file containing raw litmus log e.store_log_dir(diro) else: s += '<td><a href=\"\">---</a></td>\\n'", "</div> </div> </body> </html> \"\"\" # Write table to file f_out = out_base", "ks) if ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out)", "log[0] # Prefix of output filename, default is the command name out_base =", "<td>5</td> <td>6</td> <td>7</td> <td>8</td> <td>9</td> <td>10</td> <td>11</td> <td>12</td> <td>13</td> <td>14</td> <td>15</td> <td>16</td> </tr>", "= ' & ' l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]',", "= 16 # Line filters lfs = [ ('uniproc', ['corr', 'corw', 'cowr', 'coww']),", "Get all the keys if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs)", "lfs = [ ('uniproc', ['corr', 'corw', 'cowr', 'coww']), ('observation', ['mp', 'isa2', 'wrc']), ('prop", "logs: e = log.get(k) if e: s += e.pp_cell_link_dir(2, diro) # Produce file", "\\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:}", "ks: h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce", "prefix before tables prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html> <head> <meta charset=\"UTF-8\"> <title>GPU", "file f_out = out_base + '-' + suf + '.tex' w_str(f_out, s) #", "= '\\midrule\\n' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower(), str(e.pos) l =", "Rest all_matching = set(all_matching) if pos: ks_s = ma.get_pos_keys(logs) else: ks_s = ma.get_keys(logs)", "logs) l[i] += str(e.pos) + '\\\\\\\\' s = '\\n'.join(l) w_str(args.out, s) ### Produce", "r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical", "</td> </tr> <tr style=\"border:none\"> <td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config> </td> </tr> </table> </center>", "Incantations flat p9 = sp.add_parser(cmds[8], description='Produce flat tables comparing the\\ effectiveness of the", "L) entry = item.pos s += item.pp_cell_link_dir(2, args.diro) # Produce file containing raw", "</html> \"\"\" # Write table to file f_out = out_base + '-' +", "names): ks = ma.get_filtered_keys(f, logs) if pos: ks = ma.get_pos_keys(logs, ks) if ks:", "+ str(self.secn) + '\">' + link +\\ '</a></li>\\n' elif level < self.last_level: self.close_nav(level)", "+= ul + '<ul>\\n' self.nav += li + '<li><a href=\"#id' + str(self.secn) +", "item.pp_cell_link_dir(2, args.diro) # Produce file containing raw litmus log item.store_log_dir(args.diro) else: # ppi_incantations:", "produce_table(ks, logs, diro=args.diro) h.add_html(s) all_matching = set(all_matching) if pos: ks = ma.get_pos_keys(logs) else:", "# Get all the keys if pos: ks = ma.get_pos_keys(logs) else: ks =", "logs = args.input assert(type(logs) == ma.Log) sep = ' & ' l =", "blocks (need to be combined to yield a single column # filter) fs1", "ks) all_matching += ks if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro)", "n = len(l) s = \"\" while l: chunk = l[:nl] line =", "global memory f = lambda e: L.is_global(e) and (L.is_warp(e) or L.does_match(e, lc)) ks1", "level): sp = self.sp li = sp * (level + 1) ul =", "== str) log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log =", "# Classified p2 = sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction) f = cmds[1] +", "elif level < self.last_level: self.close_nav(level) self.nav += li + '<li><a href=\"#id' + str(self.secn)", "Produce table with sections according to axioms def classified(args): pos = args.pos logs", "sfs: s = prefix s = s.replace('<config>', cfg, 1) s = s.replace('<chip>', chip,", "file # are ignored; non-existing tests and non-existing entries (e.g. for a certain", "action=InputAction) f = cmds[6] + '.tex' p7.add_argument('-o', '--out', action='store', default=f) p7.set_defaults(func=partial(mux, latex3)) #", "(text or pickle)') f = cmds[7] p8.add_argument('-o', '--out', action='store', default=f, help='output file basename", "s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue # Name of test", "new row s += '<tr>\\n' le = ma.get_entry(k, logs) s += le.pp_prefix(2) for", "assert(len(ks3) == n) for i, k in enumerate(ks3): e = ma.get_entry(k, logs) l[i]", "chip to run the test. </center> <br> <center> <table style=\"border:none\"> <tr style=\"border:none\"> <td", "\"__main__\": if len(sys.argv) == 1: sys.argv += ['-h'] cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds", "above functions names = [ 'Different warps, same CTA; global memory', 'Different CTAs,", "+= '\\\\bottomrule\\n' return s ### Produce latex tables def latex(args): pos = args.pos", "the\\ effectiveness of the incantations') p9.add_argument('input', action=InputAction, help='log (text or pickle)') f =", "= [lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)] fs4 = [lambda e: not", "ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce", "s1 + shared memory f = lambda e: L.is_shared(e) and (L.is_warp(e) or L.does_match(e,", "} tr:nth-child(1), tr:nth-child(5) { border-bottom: 2px solid black; } table { border-top: none;", "The logfile contains the litmus test code, and the incantations used for the", "+= '\\\\hline\\n' s += '\\\\end{tabular}\\n' # Write table to file f_out = out_base", "= log.get_all() assert(lty(les, L)) # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c}", "d-warp:s-cta table, global memory f = lambda e: L.is_global(e) and \\ ((L.is_warp(e) and", "test code. When a dash appears instead of a result, it is either", "ignored; non-existing tests and non-existing entries (e.g. for a certain # combination of", "l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl(f, logs, n) s += '\\n'", "ma.get_pos_keys(logs) else: ks_s = ma.get_keys(logs) ks_s = set(ks_s) - all_matching ks_s = list(ks_s)", "Produce latex tables def latex(args): pos = args.pos logs = args.input assert(type(logs) ==", "= list() for i, k in enumerate(ks1): e = ma.get_entry(k, logs) l.append(e.short_name.lower() +", "l = [ ('SC per location', ['CO', 'Co']), ('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation',", "the logfile for a test, click on the corresponding number. The logfile also", "L.is_barrier(e)] fs4 = [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)] nc = 16", "'\\\\hline\\n' s += '\\\\end{tabular}\\n' # Write table to file f_out = out_base +", "for log in logs: # Remove directory prefix and suffix name = os.path.basename(log.fn)", "'wrc']), ('prop light', ['2+2w', 'w+rw+2w', 's']), ('prop heavy', ['sb', 'rwc', 'iriw', 'r']), ('thin", "latex3)) # Incantations p8 = sp.add_parser(cmds[7], description='Produce tables comparing the\\ effectiveness of the", "warps, global memory', 's1-global'), (lambda e: L.is_warp(e) and L.is_shared(e), 'All threads in different", "[self.prefix] if nav: l += [self.nav] l += self.items + [self.suffix] self.s =", "args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) s = '' h =", "assert(type(chip) == str) # Get incantation log log = ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc))", "'s']), ('prop heavy', ['sb', 'rwc', 'iriw', 'r']), ('thin air', ['lb']) ] lfs =", "item = itemify(item) assert(type(item) == L) entry = item.pos s += item.pp_cell_link_dir(2, args.diro)", "comparing\\ the effectiveness of the incantations') p10.add_argument('input', action=InputAction, help='log (text or pickle)') f", "action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return p if __name__ == \"__main__\": if len(sys.argv) ==", "<!DOCTYPE html> <html style=\"background:white;\"> <head> <meta charset=\"UTF-8\"> <title>Evaluating incantations</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\"", "add_html(self, html): self.items.append(html) def finish(self, nav=True): self.close_nav(-1) l = [self.prefix] if nav: l", "assert(len(ks2) == n) for i, k in enumerate(ks2): e = ma.get_entry(k, logs) l[i]", "f_out = out_base + '-' + suf + '.tex' w_str(f_out, s) # ------------------------------------------------------------------------------", "args.input assert(type(logs) == ma.Log) sep = ' & ' l = ['CO', 'Co',", "= args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) # Get all the keys if pos:", "padding-right: 15px; padding-left: 15px; } td:nth-child(1) { text-align: left; } tr:nth-child(1), tr:nth-child(5) {", "key are included in the # table) def produce_table(ks, logs, diro='entries'): logs =", "] lfs = collections.OrderedDict(lfs) for sf, cfg, suf in sfs: s = prefix", "code. When a dash appears instead of a result, it is either because", "s2 + global memory f = lambda e: L.is_global(e) and (L.is_cta(e) or L.does_match(e,", "according to # names def get_section_filters(): def c(f, g): return lambda e: f(e)", "'incantations-flat', 'incantations-html'] p = get_cmdline_parser(cmds) if cmd not in cmds: p.print_help() sys.exit(2) print('cmd:", "</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td>", "\\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}", "[self.suffix] self.s = ''.join(l) def write(self, fn): assert(self.s) f = open(fn, 'w') f.write(self.s)", "ppi_incantations: mem_stress, general_bc, barrier, rand_threads s += ' & ' + str(entry) s", "</td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr>", "pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) s = ''", "</div> </div> </body> </html> \"\"\") def __init__(self): self.items = [] self.nav = '<h4>Contents</h4>\\n'", "= '\\n'.join(l) w_str(args.out, s) ### Produce latex tables def latex3(args): pos = args.pos", "lfs = collections.OrderedDict(lfs) for sf, cfg, suf in sfs: s = prefix s", "lc = ['CoWW', 'COWW'] ks = ma.get_matching_keys(l, logs) # Names + s1 +", "line # l: list of items # sep: separator # end: end of", "sep) + end l = l[nl:] return s def latex_tbl(f, logs, n): ks", "diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce flat table with all tests def flat(args):", "ks = ma.get_keys(logs) ks = set(ks) - all_matching ks = list(ks) if ks:", "produce_table(ks, logs) h.add_html(s) h.finish() h.write(args.out) ### Two level classification def two_level(args): pos =", "# Incantations flat p9 = sp.add_parser(cmds[8], description='Produce flat tables comparing the\\ effectiveness of", "(text or pickle)') f = cmds[8] p9.add_argument('-o', '--out', action='store', default=f, help='output file basename", "l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0): continue #", "chip == chip_old: break chip_old = chip assert(type(chip) == str) # Get incantation", "test and chip, click on the corresponding number. The logfile contains the litmus", "assert(lty(l2, L)) if (len(l2) == 0): continue s += t for i in", "memory', 'Mixed scopes, global memory', 'Mixed scopes, shared memory', 'Mixed scopes, mixed memory'", "cmd not in cmds: p.print_help() sys.exit(2) print('cmd: ' + cmd) pr = p.parse_args()", "as ma from machinery import ErrMsg, chk, bail from machinery import LogEntry as", "* sep) + end l = l[nl:] return s def latex_tbl(f, logs, n):", "log objects (only logs which have the key are included in the #", "none & R & S & R+S\\\\ \\hline \"\"\") # Scope and mem", "'incantations-html'] p = get_cmdline_parser(cmds) if cmd not in cmds: p.print_help() sys.exit(2) print('cmd: '", "names # Get key patterns per axiom def get_axiom_patterns(): l = [ ('SC", "according to scopes and memory regions; no filtering according to # names def", "i in range(0, nc): i1 = (i & 0b1000) >> 3 i2 =", "Mixed scopes, global memory c(L.is_mixed_scope, L.is_global), # Mixed scopes, shared memory c(L.is_mixed_scope, L.is_shared),", "### Produce table with sections according to axioms def classified(args): pos = args.pos", "in l: ks = ma.get_matching_keys(val, logs) if pos: ks = ma.get_pos_keys(logs, ks) all_matching", "and L.is_shared(e), 'All threads in different warps, shared memory', 's1-shared'), (lambda e: L.is_cta(e)", "s += '\\\\\\\\\\n' s += '\\\\hline\\n' s += '\\\\end{tabular}\\n' # Write table to", "<td style=\"text-align:left\">Config:</td> <td style=\"text-align:left\"> <config> </td> </tr> </table> </center> <br> <table> <tr> <td>", "textwrap.dedent(\"\"\" </div> </div> </body> </html> \"\"\") def __init__(self): self.items = [] self.nav =", "if pos: ks_s = ma.get_pos_keys(logs) else: ks_s = ma.get_keys(logs) ks_s = set(ks_s) -", "ma.get_entry(k, logs) return e.short_name.lower(), str(e.pos) l = list(map(mapper, ks)) l1, l2 = zip(*l)", "def fill_up(l, sep, end, nl): n = len(l) s = \"\" while l:", "none; } </style> </head> <body> <div class=\"outer\" style=\"width: 100%;\"> <div class=\"inner\"> <h1>Evaluating incantations</h1>", "= HtmlFile() filters = get_section_filters() names = get_section_names() for f, name in zip(filters,", "Two level classification def two_level(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log))", "# ------------------------------------------------------------------------------ ### Produce incantations tables # All tests that are not explicitely", "CTAs, global memory', 's2-global') ] # Column filter building blocks (need to be", "line by line # l: list of items # sep: separator # end:", "latex_tbl(f, logs, n) s += '\\n' # Produce d-warp:s-cta table, shared memory f", "in zip(filters, names): ks = ma.get_filtered_keys(f, logs) if pos: ks = ma.get_pos_keys(logs, ks)", "parents=[parent]) p7.add_argument('input', action=InputAction) f = cmds[6] + '.tex' p7.add_argument('-o', '--out', action='store', default=f) p7.set_defaults(func=partial(mux,", "default is the command name out_base = args.out assert(out_base) les = log.get_all() assert(lty(les,", "table description, filename suffix for sf, cfg, suf in sfs: s = prefix", "this file # are ignored; non-existing tests and non-existing entries (e.g. for a", "ks = ma.get_matching_keys(l, logs) # Names + s1 + global memory f =", "of incantations) are also ignored def incantations(args): log = args.input assert(type(log) == str)", "raw litmus log e.store_log_dir(diro) else: s += '<td><a href=\"\">---</a></td>\\n' s += '</tr>\\n' s", "non-existing entries (e.g. for a certain # combination of incantations) are also ignored", "\"\"\") # Scope and mem filters, including table description and filename suffix sfs", "for name, val in l: ks = ma.get_matching_keys(val, logs) if pos: ks =", "c(L.is_cta, L.is_global), c(L.is_ker, L.is_global), # Simple scopes, shared memory c(L.is_warp, L.is_shared), # Simple", "t in short_names: l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) ==", "L.is_global(e) and (L.is_warp(e) or L.does_match(e, lc)) ks1 = ma.get_filtered_keys(f, logs, ks) ks1.sort() n", "[] for name, val in l: ks = ma.get_matching_keys(val, logs) if pos: ks", "because optcheck failed or because there were insufficient resources on the chip to", "default=f, help='output file basename (instead of default name)') p8.set_defaults(func=partial(mux, incantations)) # Incantations flat", "\\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:} & \\multicolumn{4}{h}{Critical Incantations:}", "# Table header # '&nbsp;': non-breaking space # '&#x2713;': checkmark prefix = textwrap.dedent(r\"\"\"", "= HtmlFile() all_matching = [] for name, val in l: ks_s = ma.get_matching_keys(val,", "& R+S & none & R & S & R+S & none &", "memory', 's1-global'), (lambda e: L.is_warp(e) and L.is_shared(e), 'All threads in different warps, shared", "'-' + suf + '.html' w_str(f_out, s) # ------------------------------------------------------------------------------ ####################### # Command line", "== ma.Log) n = 4 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]',", "f(args) ############### # Subcommands # ############### ### Produce table with sections according to", "== self.last_level: self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">' +", "to scopes and memory regions; no filtering according to # names def get_section_filters():", "incantation tables def incantations_flat(args): log = args.input assert(type(log) == str) chip = os.path.basename(log)", "# Column filters fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2", "'incantations', 'incantations-flat', 'incantations-html'] p = get_cmdline_parser(cmds) if cmd not in cmds: p.print_help() sys.exit(2)", "Line filters lfs = [ ('uniproc', ['corr', 'corw', 'cowr', 'coww']), ('observation', ['mp', 'isa2',", "s = '<table>\\n' # Process header s += '<tr>\\n' s += ' <th>Scope", "kernels, same device; global memory', 'Different warps, same CTA; shared memory', 'Different warps,", "d = [ # Simple scopes, global memory c(L.is_warp, L.is_global), c(L.is_cta, L.is_global), c(L.is_ker,", "== self.last_level + 1: self.nav += ul + '<ul>\\n' self.nav += li +", "L.is_global(e) and (L.is_cta(e) or L.does_match(e, lc)) ks3 = ma.get_filtered_keys(f, logs, ks) ks3.sort() assert(len(ks3)", "(L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl2(f, logs, n) s += '\\n' # Produce", "+= line + ((nl - len(chunk)) * sep) + end l = l[nl:]", "(L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl(f, logs, n) s += '\\n' # Produce", "html> <html> <head> <meta charset=\"UTF-8\"> <title>GPU Litmus Test Results</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\"", "self.last_level = -1 def add_nav_item(self, link, level): sp = self.sp li = sp", "def latex_tbl2(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep = ' & '", "link +\\ '</a></li>\\n' elif level == self.last_level + 1: self.nav += ul +", "e: L.is_global(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s", "(i & 0b1000) >> 3 i2 = (i & 0b0100) >> 2 i3", "['mp', 'isa2', 'wrc']), ('prop light', ['2+2w', 'w+rw+2w', 's']), ('prop heavy', ['sb', 'rwc', 'iriw',", "<div class=\"outer\"> <div class=\"inner\"> <h1>GPU Litmus Test Results</h1> <br> <center> To view the", "file basename (instead of default name)') p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations html p10 =", "# Open files and parse or unpickle class InputAction(argparse.Action): def __call__(self, parser, namespace,", "and mem filters, table description, filename suffix for sf, cfg, suf in sfs:", "t in tests: # Get all tests that match a simple test name", "if idx != -1: name = name[:idx] s += ' <th>' + name", "# Mixed scopes, global memory c(L.is_mixed_scope, L.is_global), # Mixed scopes, shared memory c(L.is_mixed_scope,", "args.diro) # Produce file containing raw litmus log item.store_log_dir(args.diro) else: # ppi_incantations: mem_stress,", "p2.set_defaults(func=partial(mux, classified)) # Sections p3 = sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+', action=InputAction) f =", "= sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds = ['flat', 'classified', 'sections', 'two-level', 'latex', 'latex2', 'latex3', 'incantations',", "ma.get_entry(k, logs) l[i] += str(e.pos) + sep # s2 + global memory f", "s += '<td>' + t + '</td>' for i in range(0, nc): i1", "+= '<td>' + t + '</td>' for i in range(0, nc): i1 =", "f = cmds[6] + '.tex' p7.add_argument('-o', '--out', action='store', default=f) p7.set_defaults(func=partial(mux, latex3)) # Incantations", "# Latex p5 = sp.add_parser(cmds[4], parents=[parent]) p5.add_argument('input', action=InputAction) f = cmds[4] + '.tex'", "= type(inp) is list if not c: inp = [inp] inp = ma.get_logs(inp,", "& ' + str(entry) s += '\\\\\\\\\\n' s += '\\\\hline\\n' s += '\\\\end{tabular}\\n'", "href=\"\">---</a></td>\\n' s += '</tr>\\n' s += '</table>\\n' return s # Filtering according to", "charset=\"UTF-8\"> <title>Evaluating incantations</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> <style> ul { padding-top: 10px;", "+= ' <th>Scope tree</th>\\n' s += ' <th>Memory map</th>\\n' s += ' <th>Name</th>\\n'", "'</tr>\\n' # Process rows for k in ks: # Start new row s", "be called; args: arguments to the function def mux(f, args): inp = args.input", "__call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) def get_cmdline_parser(cmds): # Parent of", "the chip to run the test. </center> <br> <center> <table style=\"border:none\"> <tr style=\"border:none\">", "line = sep.join(chunk) s += line + ((nl - len(chunk)) * sep) +", "is the command name out_base = args.out assert(out_base) les = log.get_all() assert(lty(les, L))", "+ sep # s2 + global memory f = lambda e: L.is_global(e) and", "['lb']) ] lfs = collections.OrderedDict(lfs) for sf, cfg, suf in sfs: s =", "chip == chip_old: break chip_old = chip assert(type(chip) == str) log = ma.get_logs(log,", "L)) if (len(l2) == 0): continue # Name of test s += t", "= args.pos logs = args.input assert(type(logs) == ma.Log) sep = ' & '", "l2 = zip(*l) l = interleave(l1, l2, n) s = fill_up(l, sep, '\\\\\\\\\\n',", "memory', 's2-global') ] # Column filters fs1 = [lambda e: not L.is_mem_stress(e), lambda", "or L.does_match(e, lc)) ks2 = ma.get_filtered_keys(f, logs, ks) ks2.sort() assert(len(ks2) == n) for", "match a simple test name (like rwc) l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2,", "for log in logs: e = log.get(k) if e: s += e.pp_cell_link_dir(2, diro)", "l: chunk = l[:nl] line = sep.join(chunk) s += line + ((nl -", "(if we do not want to read an incantation log) if f !=", "ordinary logs (if we do not want to read an incantation log) if", "logs) sep = ' & ' s = '\\midrule\\n' def mapper(k): e =", "name (like rwc) l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) ==", "ks3.sort() assert(len(ks3) == n) for i, k in enumerate(ks3): e = ma.get_entry(k, logs)", "further help', title= 'subcommands') # Flat p1 = sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction)", "+ 1) ul = sp * (self.last_level + 1) if level == self.last_level:", "h.new_section(name, 1) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ### Produce table", "assert(type(item) == L) entry = item.pos s += item.pp_cell_link_dir(2, args.diro) # Produce file", "help='output file basename (instead of default name)') p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations html p10", "os import sys import collections import textwrap from functools import partial import machinery", "# Column filter building blocks (need to be combined to yield a single", "\\multicolumn{4}{c}{Extra Incantations:}\\\\ & none & R & S & R+S & none &", "0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) all_matching = set(all_matching) if pos: ks", "+ s1 + global memory f = lambda e: L.is_global(e) and (L.is_warp(e) or", "incantations') p10.add_argument('input', action=InputAction, help='log (text or pickle)') f = cmds[9] p10.add_argument('-o', '--out', action='store',", "+ global memory f = lambda e: L.is_global(e) and (L.is_cta(e) or L.does_match(e, lc))", "# ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_html_flat(args): log = args.input assert(type(log)", "Litmus Test Results</h1> <br> <center> To view the logfile for a test and", "L.is_cta(e) and L.is_global(e), 'All threads in different CTAs, global memory', 's2-global') ] #", "new_section(self, heading, level): assert(0 <= level <= 2) l = str(level+2) s =", "ks_s = set(ks_s) - all_matching ks_s = list(ks_s) if ks_s: h.new_section('Other', 0) ks_s.sort()", "i, k in enumerate(ks1): e = ma.get_entry(k, logs) l.append(e.short_name.lower() + sep + str(e.pos)", "= log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header # '&nbsp;': non-breaking space #", "'r']), ('thin air', ['lb']) ] lfs = collections.OrderedDict(lfs) for sf, cfg, suf in", "# Latex 2 p6 = sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction) f = cmds[5] +", "n = 4 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]',", "args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) s = '' h = HtmlFile() filters =", "if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s) h.finish() h.write(args.out) ###", "</td><td> </td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>thread&nbsp;randomisation</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> <td> </td><td>&#x2713;</td><td>", "log.get(k) if e: s += e.pp_cell_link_dir(2, diro) # Produce file containing raw litmus", "all_matching ks_s = list(ks_s) if ks_s: h.new_section('Other', 0) ks_s.sort() filters = get_section_filters() names", "and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl2(f, logs, n) w_str(args.out, s)", "simple test name (like rwc) l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if", "or unpickle class InputAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values)", "= argparse.ArgumentParser() # Dummy parent for common options parent = argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos',", "- len(chunk)) * sep) + end l = l[nl:] return s def latex_tbl(f,", "p3.add_argument('input', nargs='+', action=InputAction) f = cmds[2] + '.html' p3.add_argument('-o', '--out', action='store', default=f) p3.add_argument('-d',", "(instead of default name)') p10.add_argument('-d', '--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return p if", "(lambda e: L.is_warp(e) and L.is_global(e), 'All threads in different warps, global memory', 's1-global'),", "Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ]) ]", "ma.LogInc)) assert(len(log) == 1) log = log[0] out_base = args.out assert(out_base) les =", "'--diro', action='store', default='entries') p2.set_defaults(func=partial(mux, classified)) # Sections p3 = sp.add_parser(cmds[2], parents=[parent]) p3.add_argument('input', nargs='+',", "e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)] fs3 = [lambda e: not L.is_barrier(e), lambda", "'--out', action='store', default=f, help='output file basename (instead of default name)') p9.set_defaults(func=partial(mux, incantations_flat)) #", "href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head> <body> <div class=\"outer\"> <div class=\"inner\"> <h1>GPU Litmus Test Results</h1>", "= sp * (self.last_level + 1) if level == self.last_level: self.nav += li", "'</a></li>\\n' elif level == self.last_level + 1: self.nav += ul + '<ul>\\n' self.nav", "1: sys.argv += ['-h'] cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds = ['flat', 'classified', 'sections',", "& \\multicolumn{4}{c}{Extra Incantations:}\\\\ & none & R & S & R+S & none", "by line # l: list of items # sep: separator # end: end", "tests def flat(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro'))", "file containing raw litmus log item.store_log_dir(args.diro) else: # ppi_incantations: mem_stress, general_bc, barrier, rand_threads", "['CoWW', 'COWW']))) s = latex_tbl2(f, logs, n) s += '\\n' # Produce d-warp:s-cta", "sp * (self.last_level + 1) if level == self.last_level: self.nav += li +", "\\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\", "h.finish() h.write(args.out) ### Produce flat table with all tests def flat(args): pos =", "Produce d-warp:s-cta table, global memory f = lambda e: L.is_global(e) and \\ ((L.is_warp(e)", "= ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) s = produce_table(ks, logs, diro=args.diro) h =", "in range(0, nc): i1 = (i & 0b1000) >> 3 i2 = (i", "return e.short_name.lower(), str(e.pos) l = list(map(mapper, ks)) l1, l2 = zip(*l) l =", "str(e.pos) + sep # s2 + global memory f = lambda e: L.is_global(e)", "'line filters' in this file # are ignored; non-existing tests and non-existing entries", "ma.get_keys(logs) ks = set(ks) - all_matching ks = list(ks) if ks: h.new_section('Other', 0)", "= ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]',", "cmd = sys.argv[1] ma.setup_err_handling('log2tbl.py') cmds = ['flat', 'classified', 'sections', 'two-level', 'latex', 'latex2', 'latex3',", "ul + '<ul>\\n' self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">'", "'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]'] lc = ['CoWW', 'COWW'] ks = ma.get_matching_keys(l, logs) #", "fill_up(l, sep, end, nl): n = len(l) s = \"\" while l: chunk", "self.last_level > level: self.nav += sp * self.last_level + '</ul>\\n' self.last_level -= 1", "+ '</th>\\n' s += '</tr>\\n' # Process rows for k in ks: #", "if level == self.last_level: self.nav += li + '<li><a href=\"#id' + str(self.secn) +", "2 p6 = sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction) f = cmds[5] + '.tex' p6.add_argument('-o',", "L.is_global), c(L.is_cta, L.is_global), c(L.is_ker, L.is_global), # Simple scopes, shared memory c(L.is_warp, L.is_shared), #", "all_matching = set(all_matching) if pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) ks", "used for the test run. </center> <br><br> \"\"\") # HTML suffix after tables", "logfile for a test and chip, click on the corresponding number. The logfile", "sp = p.add_subparsers(help='use <subcommand> -h for further help', title= 'subcommands') # Flat p1", "# Name of test s += t for i in range(0, nc): i1", "in the table # logs: list of log objects (only logs which have", "[ ('SC per location', ['CO', 'Co']), ('No Thin Air', ['(LB$)|(LB\\+)|(LB\\-)']), ('Observation', ['(MP$)|(MP\\+)|(MP\\-)', 'WRC',", "assert(type(log) == str) assert(hasattr(args, 'diro')) chip = os.path.basename(log) assert(type(chip) == str) chip_old =", "Filtering according to scopes and memory regions; no filtering according to # names", "tables def latex3(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log) n", "if (len(l2) == 0): continue s += t for i in range(0, nc):", "# sep: separator # end: end of line # n: number of elements", "&' * nc) + r'\\\\' + '\\n' for t in tests: # Get", "default=f) p6.set_defaults(func=partial(mux, latex2)) # Latex 3 p7 = sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction) f", "chip while True: chip = os.path.splitext(chip)[0] if chip == chip_old: break chip_old =", "# Section header s += r'{\\bf ' + sec + '}' + ('", "level < self.last_level: self.close_nav(level) self.nav += li + '<li><a href=\"#id' + str(self.secn) +", "type(inp) is list if not c: inp = [inp] inp = ma.get_logs(inp, lh=ma.Log)", "table { border-top: none; } </style> </head> <body> <div class=\"outer\" style=\"width: 100%;\"> <div", "= args.input l = list(listify(inp)) if hasattr(args, 'out'): l.append(args.out) chk(not dupchk(l), 'duplicate files", "suffix = textwrap.dedent(\"\"\" </div> </div> </body> </html> \"\"\") def __init__(self): self.items = []", "S & R+S\\\\ \\hline \"\"\") # Scope and mem filters, including table description", "files given') # Read ordinary logs (if we do not want to read", "ks = ma.get_filtered_keys(f, logs) sep = ' & ' s = '\\midrule\\n' def", "g r r r r} \\toprule \\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline &", "n: number of elements on line def fill_up(l, sep, end, nl): n =", "k in enumerate(ks2): e = ma.get_entry(k, logs) l[i] += str(e.pos) + sep #", "def sections(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) s", "+ '</td>' s += '</tr>\\n' s += \"\"\" </table> </div> </div> </body> </html>", "= '' h = HtmlFile() filters = get_section_filters() names = get_section_names() for f,", "'--pos', action='store_true') # Subparsers sp = p.add_subparsers(help='use <subcommand> -h for further help', title=", "'Different warps, same CTA; mixed memory', 'Mixed scopes, global memory', 'Mixed scopes, shared", "import machinery as ma from machinery import ErrMsg, chk, bail from machinery import", "- all_matching ks = list(ks) if ks: h.new_section('Other', 0) ks.sort() s = produce_table(ks,", "separator # end: end of line # n: number of elements on line", "+ '</td>' for i in range(0, nc): i1 = (i & 0b1000) >>", "= interleave(l1, l2, n) s = fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n'", "ma.Log) sep = ' & ' l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]',", "html tables comparing\\ the effectiveness of the incantations') p10.add_argument('input', action=InputAction, help='log (text or", "nav: l += [self.nav] l += self.items + [self.suffix] self.s = ''.join(l) def", "latex_tbl2(f, logs, n): ks = ma.get_filtered_keys(f, logs) sep = ' & ' s", "pos: ks = ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) ks = set(ks) - all_matching", "get_section_filters() names = get_section_names() for f, name in zip(filters, names): ks = ma.get_filtered_keys(f,", "click on the corresponding number. The logfile contains the litmus test code, and", "{ text-align: right; padding: 5px; padding-right: 15px; padding-left: 15px; } td:nth-child(1) { text-align:", "& R & S & R+S\\\\ \\hline \"\"\") # Scope and mem filters,", "< self.last_level: self.close_nav(level) self.nav += li + '<li><a href=\"#id' + str(self.secn) + '\">'", "logs) if pos: ks_s = ma.get_pos_keys(logs, ks_s) all_matching += ks_s if ks_s: h.new_section(name,", "L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s = latex_tbl2(f, logs, n) s +=", "['(MP$)|(MP\\+)|(MP\\-)', 'WRC', 'ISA2']), ('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)',", "s += '\\\\end{tabular}\\n' # Write table to file f_out = out_base + '-'", "L.does_match(e, l))) s += latex_tbl(f, logs, n) w_str(args.out, s) def latex2(args): pos =", "------------------------------------------------------------------------------ ### Used by all HTML file producers # ks: list of test", "assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] out_base = args.out assert(out_base) les", "name in zip(filters, names): ks = ma.get_filtered_keys(f, logs) if pos: ks = ma.get_pos_keys(logs,", "inp = [inp] inp = ma.get_logs(inp, lh=ma.Log) if not c: inp = inp[0]", "Latex 3 p7 = sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction) f = cmds[6] + '.tex'", "sep + str(e.pos) l = list(map(mapper, ks)) header = sep.join([\"Test\" + sep +", "= name[:idx] s += ' <th>' + name + '</th>\\n' s += '</tr>\\n'", "default name)') p8.set_defaults(func=partial(mux, incantations)) # Incantations flat p9 = sp.add_parser(cmds[8], description='Produce flat tables", "& \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\ & none & R & S &", "les = log.get_all() assert(lty(les, L)) # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r}", "Incantations:}\\\\ & \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:} &", "get_section_names() for f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs) if pos:", "# Mixed scopes, mixed memory c(L.is_mixed_scope, L.is_mixed_mem) ] return d def get_section_names(): #", "chk(not dupchk(l), 'duplicate files given') # Read ordinary logs (if we do not", "('Propagation Light', ['2\\+2W', 'W\\+RW\\+2W', '(S$)|(S\\+)|(S\\-)']), ('Propagation Heavy', [ 'SB', '(R$)|(R\\+)|(R\\-)', 'RWC', 'IRIW' ])", "p10 = sp.add_parser(cmds[9], description='Produce flat html tables comparing\\ the effectiveness of the incantations')", "'IRIW' ]) ] return l # ------------------------------------------------------------------------------ ############ # Toplevel # ############ #", "enumerate(ks2): e = ma.get_entry(k, logs) l[i] += str(e.pos) + sep # s2 +", "Classified p2 = sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction) f = cmds[1] + '.html'", "= [] self.nav = '<h4>Contents</h4>\\n' self.secn = 0 self.last_level = -1 def add_nav_item(self,", "l2, n) s = fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s", "diro=args.diro) h.add_html(s) # Rest all_matching = set(all_matching) if pos: ks_s = ma.get_pos_keys(logs) else:", "self.secn = 0 self.last_level = -1 def add_nav_item(self, link, level): sp = self.sp", "-1: name = name[:idx] s += ' <th>' + name + '</th>\\n' s", "html> <html style=\"background:white;\"> <head> <meta charset=\"UTF-8\"> <title>Evaluating incantations</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/>", "' # HTML prefix before tables prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE html> <html> <head>", "############### # Subcommands # ############### ### Produce table with sections according to axioms", "logs, diro='entries'): logs = [ l for l in logs if l.any_key(ks) ]", "= ma.get_filtered_keys(f, logs, ks) ks2.sort() assert(len(ks2) == n) for i, k in enumerate(ks2):", "== 0): continue # Name of test s += '<tr>\\n' s += '<td>'", "'\\\\end{tabular}\\n' # Write table to file f_out = out_base + '-' + suf", "L)) if (len(l2) == 0): continue # Name of test s += '<tr>\\n'", "val in l: ks = ma.get_matching_keys(val, logs) if pos: ks = ma.get_pos_keys(logs, ks)", "html): self.items.append(html) def finish(self, nav=True): self.close_nav(-1) l = [self.prefix] if nav: l +=", "l += [self.nav] l += self.items + [self.suffix] self.s = ''.join(l) def write(self,", "</body> </html> \"\"\") def __init__(self): self.items = [] self.nav = '<h4>Contents</h4>\\n' self.secn =", "Produce flat incantation tables def incantations_html_flat(args): log = args.input assert(type(log) == str) assert(hasattr(args,", "= sp.add_parser(cmds[5], parents=[parent]) p6.add_argument('input', action=InputAction) f = cmds[5] + '.tex' p6.add_argument('-o', '--out', action='store',", "'--out', action='store', default=f, help='output file basename (instead of default name)') p10.add_argument('-d', '--diro', action='store',", "prefix s = s.replace('<config>', cfg, 1) s = s.replace('<chip>', chip, 1) l1 =", "line # n: number of elements on line def fill_up(l, sep, end, nl):", "+ sec + '}' + (' &' * nc) + r'\\\\' + '\\n'", "e: L.is_warp(e) and L.is_shared(e), 'All threads in different warps, shared memory', 's1-shared'), (lambda", "air', ['lb']) ] lfs = collections.OrderedDict(lfs) for sf, cfg, suf in sfs: s", "<td> </td><td>&#x2713;</td><td> </td><td>&#x2713;</td> </tr> \"\"\") # Scope and mem filters, including table description", "= '<table>\\n' # Process header s += '<tr>\\n' s += ' <th>Scope tree</th>\\n'", "L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header # '&nbsp;': non-breaking", "logfile for a test, click on the corresponding number. The logfile also contains", "L.is_mem_stress(e)] fs2 = [lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)] fs3 = [lambda", "sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input', nargs='+', action=InputAction) f = cmds[1] + '.html' p2.add_argument('-o', '--out', action='store',", "device; global memory', 'Different warps, same CTA; shared memory', 'Different warps, same CTA;", "l = interleave(l1, l2, n) s = fill_up(l, sep, '\\\\\\\\\\n', n) s +=", "class=\"inner\"> <h1>GPU Litmus Test Results</h1> <br> <center> To view the logfile for a", "lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] # Prefix of output", "p1 = sp.add_parser(cmds[0], parents=[parent]) p1.add_argument('input', nargs='+', action=InputAction) f = cmds[0] + '.html' p1.add_argument('-o',", "litmus test code. When a dash appears instead of a result, it is", "idx != -1: name = name[:idx] s += ' <th>' + name +", "Test Results</title> <link rel=\"stylesheet\" href=\"common.css\" type=\"text/css\" media=\"screen\"/> </head> <body> <div class=\"outer\"> <div class=\"inner\">", "inp f(args) ############### # Subcommands # ############### ### Produce table with sections according", "and chip, click on the corresponding number. The logfile contains the litmus test", "view the logfile for a test, click on the corresponding number. The logfile", "l = get_axiom_patterns() h = HtmlFile() all_matching = [] for name, val in", "functions that each take a log entry d = [ # Simple scopes,", "s.replace('<chip>', chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for t in short_names:", "str(self.secn) + '\">' + link +\\ '</a></li>\\n' else: assert(False) self.last_level = level def", "To view the logfile for a test, click on the corresponding number. The", "level classification def two_level(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args,", "</head> <body> <div class=\"outer\" style=\"width: 100%;\"> <div class=\"inner\"> <h1>Evaluating incantations</h1> <br> <center> To", "\"\"\") # HTML suffix after tables suffix = textwrap.dedent(\"\"\" </div> </div> </body> </html>", "w_str # ------------------------------------------------------------------------------ # Html file (including navigation and sections) class HtmlFile: \"\"\"Html", "LogEntry as L from generic import lty, interleave, itemify, dupchk, listify, w_str #", "in logs: # Remove directory prefix and suffix name = os.path.basename(log.fn) idx =", "file producers # ks: list of test names to include in the table", "assert(lty(short_names, str)) short_names.sort() # Table header # '&nbsp;': non-breaking space # '&#x2713;': checkmark", "p3.set_defaults(func=partial(mux, sections)) # Two-level p4 = sp.add_parser(cmds[3], parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction) f =", "args.out assert(out_base) les = log.get_all() assert(lty(les, L)) # Table header prefix = textwrap.dedent(r\"\"\"", "ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) s = produce_table(ks, logs, diro=args.diro) h = HtmlFile()", "idx = name.find('.') if idx != -1: name = name[:idx] s += '", "sep: separator # end: end of line # n: number of elements on", "<tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td> </td><td> </td>", "------------------------------------------------------------------------------ ### Fill up table line by line # l: list of items", "s += ' <th>Scope tree</th>\\n' s += ' <th>Memory map</th>\\n' s += '", "HtmlFile: \"\"\"Html file representing litmus test results\"\"\" sp = ' ' # HTML", "parents=[parent]) p4.add_argument('input', nargs='+', action=InputAction) f = cmds[3] + '.html' p4.add_argument('-o', '--out', action='store', default=f)", "scopes, mixed memory c(L.is_warp, L.is_mixed_mem), # Mixed scopes, global memory c(L.is_mixed_scope, L.is_global), #", "non-breaking space # '&#x2713;': checkmark prefix = textwrap.dedent(r\"\"\" <!DOCTYPE html> <html style=\"background:white;\"> <head>", "\\begin{tabular}{l g g g g r r r r g g g g", "e: L.is_rand_threads(e)] nc = 16 # Line filters lfs = [ ('uniproc', ['corr',", "incantation log) if f != incantations and f != incantations_flat and f !=", "set(ks) - all_matching ks = list(ks) if ks: h.new_section('Other', 0) ks.sort() s =", "mixed memory c(L.is_mixed_scope, L.is_mixed_mem) ] return d def get_section_names(): # Parallel the above", "if ks_s: h.new_section(name, 0) # Now divide by other sections filters = get_section_filters()", "level == self.last_level + 1: self.nav += ul + '<ul>\\n' self.nav += li", "to file f_out = out_base + '-' + suf + '.html' w_str(f_out, s)", "(instead of default name)') p9.set_defaults(func=partial(mux, incantations_flat)) # Incantations html p10 = sp.add_parser(cmds[9], description='Produce", "p1.add_argument('input', nargs='+', action=InputAction) f = cmds[0] + '.html' p1.add_argument('-o', '--out', action='store', default=f) p1.add_argument('-d',", "S & R+S & none & R & S & R+S & none", "'RWC[^+]', 'IRIW[^+]'] lc = ['CoWW', 'COWW'] ks = ma.get_matching_keys(l, logs) # Names +", "or pickle)') f = cmds[9] p10.add_argument('-o', '--out', action='store', default=f, help='output file basename (instead", "open(fn, 'w') f.write(self.s) f.close() # ------------------------------------------------------------------------------ ### Used by all HTML file producers", "= fs1[i1] f2 = fs2[i2] f3 = fs3[i3] f4 = fs4[i4] f =", "logs = [ l for l in logs if l.any_key(ks) ] s =", "= collections.OrderedDict(lfs) for sf, cfg, suf in sfs: s = prefix s =", "L.is_shared), # Mixed scopes, mixed memory c(L.is_mixed_scope, L.is_mixed_mem) ] return d def get_section_names():", "Get all tests that match a simple test name (like rwc) l2 =", "s) # ------------------------------------------------------------------------------ ### Produce flat incantation tables def incantations_flat(args): log = args.input", "and \\ ((L.is_cta(e) and L.does_match(e, l))) s += latex_tbl(f, logs, n) w_str(args.out, s)", "fs1[i1] f2 = fs2[i2] f3 = fs3[i3] f4 = fs4[i4] f = lambda", "incantations used for the test run. </center> <br><br> \"\"\") # HTML suffix after", "'<table>\\n' # Process header s += '<tr>\\n' s += ' <th>Scope tree</th>\\n' s", "ma.get_pos_keys(logs, ks_s) all_matching += ks_s if ks_s: h.new_section(name, 0) # Now divide by", "assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) # Get all the keys if pos: ks =", "lc)) ks2 = ma.get_filtered_keys(f, logs, ks) ks2.sort() assert(len(ks2) == n) for i, k", "1) s = s.replace('<chip>', chip, 1) l1 = list(filter(sf, les)) assert(lty(l1, L)) for", "e: L.is_rand_threads(e)] nc = 16 # Scope and mem filters, table description, filename", "in short_names: l2 = list(filter(partial(L.simple_match, s=t), l1)) assert(lty(l2, L)) if (len(l2) == 0):", "L.is_warp(e) and L.is_shared(e), 'All threads in different warps, shared memory', 's1-shared'), (lambda e:", "the above functions names = [ 'Different warps, same CTA; global memory', 'Different", "+ ((nl - len(chunk)) * sep) + end l = l[nl:] return s", "= ma.get_logs(log, lh=ma.LogInc) assert(lty(log, ma.LogInc)) assert(len(log) == 1) log = log[0] # Prefix", "memory', 'Different warps, same CTA; shared memory', 'Different warps, same CTA; mixed memory',", "' & ' l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]',", "return l # ------------------------------------------------------------------------------ ############ # Toplevel # ############ # f: function to", "w_str(f_out, s) # ------------------------------------------------------------------------------ ####################### # Command line parser # ####################### # Open", "pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 1) s = produce_table(ks, logs,", "because there were insufficient resources on the chip to run the test. </center>", "<td>memory&nbsp;stress</td> <td> </td><td> </td><td> </td><td> </td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td>", "\\multicolumn{17}{l}{Chip: <chip>}\\\\ \\multicolumn{17}{l}{GPU Configuration: <config>}\\\\ \\hline & \\multicolumn{4}{h}{Critical Incantations:} & \\multicolumn{4}{c}{Critical Incantations:} &", "L)) short_names = log.get_names() assert(lty(short_names, str)) short_names.sort() # Table header prefix = textwrap.dedent(r\"\"\"", "nargs='+', action=InputAction) f = cmds[2] + '.html' p3.add_argument('-o', '--out', action='store', default=f) p3.add_argument('-d', '--diro',", "bail from machinery import LogEntry as L from generic import lty, interleave, itemify,", "= ma.get_pos_keys(logs, ks_s) all_matching += ks_s if ks_s: h.new_section(name, 0) # Now divide", "<td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr>", "L.does_match(e, lc)) ks1 = ma.get_filtered_keys(f, logs, ks) ks1.sort() n = len(ks1) l =", "</center> <br> <table> <tr> <td> </td> <td>1</td> <td>2</td> <td>3</td> <td>4</td> <td>5</td> <td>6</td> <td>7</td>", "os.path.splitext(chip)[0] if chip == chip_old: break chip_old = chip assert(type(chip) == str) #", "zip(filters, names): ks = ma.get_filtered_keys(f, logs, ks_s) if pos: ks = ma.get_pos_keys(logs, ks)", "ma.get_entry(k, logs) return e.short_name.lower() + sep + str(e.pos) l = list(map(mapper, ks)) header", "= (i & 0b0001) f1 = fs1[i1] f2 = fs2[i2] f3 = fs3[i3]", "elif level == self.last_level + 1: self.nav += ul + '<ul>\\n' self.nav +=", "l = list(map(mapper, ks)) l1, l2 = zip(*l) l = interleave(l1, l2, n)", "tables comparing the\\ effectiveness of the incantations') p9.add_argument('input', action=InputAction, help='log (text or pickle)')", "L.is_rand_threads(e)] nc = 16 # Line filters lfs = [ ('uniproc', ['corr', 'corw',", "</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr> <td>general&nbsp;bank&nbsp;conflicts</td> <td> </td><td>", "# Command line parser # ####################### # Open files and parse or unpickle", "p1.add_argument('-d', '--diro', action='store', default='entries') p1.set_defaults(func=partial(mux, flat)) # Classified p2 = sp.add_parser(cmds[1], parents=[parent]) p2.add_argument('input',", "list() for i, k in enumerate(ks1): e = ma.get_entry(k, logs) l.append(e.short_name.lower() + sep", "= ma.get_pos_keys(logs) else: ks = ma.get_keys(logs) ks = set(ks) - all_matching ks =", "assert(lty(les, L)) # Table header prefix = textwrap.dedent(r\"\"\" \\definecolor{Gray}{gray}{0.85} \\newcolumntype{g}{>{\\columncolor{Gray}}r} \\newcolumntype{h}{>{\\columncolor{Gray}}c} \\begin{tabular}{l g", "* nc) + r'\\\\' + '\\n' for t in tests: # Get all", "if item: item = itemify(item) assert(type(item) == L) entry = item.pos s +=", "= 8 l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\\+2W[^+]', 'W\\+RW\\+2W[^+]', 'S[^+]+$',", ">> 1 i4 = (i & 0b0001) f1 = fs1[i1] f2 = fs2[i2]", "self.nav = '<h4>Contents</h4>\\n' self.secn = 0 self.last_level = -1 def add_nav_item(self, link, level):", "1 def add_html(self, html): self.items.append(html) def finish(self, nav=True): self.close_nav(-1) l = [self.prefix] if", "e = ma.get_entry(k, logs) l[i] += str(e.pos) + '\\\\\\\\' s = '\\n'.join(l) w_str(args.out,", "and f != incantations_flat and f != incantations_html_flat: c = type(inp) is list", "& \\multicolumn{4}{c}{Critical Incantations:}\\\\ & \\multicolumn{4}{h}{none} & \\multicolumn{4}{c}{GBC} & \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra", "= open(fn, 'w') f.write(self.s) f.close() # ------------------------------------------------------------------------------ ### Used by all HTML file", "'.html' p2.add_argument('-o', '--out', action='store', default=f) p2.add_argument('-d', '--diro', action='store', default='entries') p2.set_defaults(func=partial(mux, classified)) # Sections", "included in the # table) def produce_table(ks, logs, diro='entries'): logs = [ l", "c(L.is_warp, L.is_global), c(L.is_cta, L.is_global), c(L.is_ker, L.is_global), # Simple scopes, shared memory c(L.is_warp, L.is_shared),", "<h1>GPU Litmus Test Results</h1> <br> <center> To view the logfile for a test", "name)') p10.add_argument('-d', '--diro', action='store', default='entries-inc') p10.set_defaults(func=partial(mux, incantations_html_flat)) return p if __name__ == \"__main__\":", "s = latex_tbl(f, logs, n) s += '\\n' # Produce d-warp:s-cta table, shared", "f, name in zip(filters, names): ks = ma.get_filtered_keys(f, logs) if pos: ks =", "'rwc', 'iriw', 'r']), ('thin air', ['lb']) ] lfs = collections.OrderedDict(lfs) for sf, cfg,", "= ma.get_filtered_keys(f, logs, ks) ks1.sort() n = len(ks1) l = list() for i,", "\"\"\") def __init__(self): self.items = [] self.nav = '<h4>Contents</h4>\\n' self.secn = 0 self.last_level", "<td>4</td> <td>5</td> <td>6</td> <td>7</td> <td>8</td> <td>9</td> <td>10</td> <td>11</td> <td>12</td> <td>13</td> <td>14</td> <td>15</td> <td>16</td>", "= get_cmdline_parser(cmds) if cmd not in cmds: p.print_help() sys.exit(2) print('cmd: ' + cmd)", "!= incantations and f != incantations_flat and f != incantations_html_flat: c = type(inp)", "logs, diro=args.diro) h.add_html(s) # Rest all_matching = set(all_matching) if pos: ks_s = ma.get_pos_keys(logs)", "lambda e: L.is_global(e) and \\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW'])))", "item: item = itemify(item) assert(type(item) == L) entry = item.pos # ppi_incantations: mem_stress,", "' s = '' def mapper(k): e = ma.get_entry(k, logs) return e.short_name.lower() +", "logs, n) w_str(args.out, s) # ------------------------------------------------------------------------------ ### Produce incantations tables # All tests", "= get_section_filters() names = get_section_names() for f, name in zip(filters, names): ks =", "l = list(map(mapper, ks)) header = sep.join([\"Test\" + sep + \"Freq.\"] * n)", "in zip(filters, names): ks = ma.get_filtered_keys(f, logs, ks_s) if pos: ks = ma.get_pos_keys(logs,", "l1)) assert(lty(l2, L)) if (len(l2) == 0): continue # Name of test s", "+ sep + str(e.pos) + sep) # s1 + shared memory f =", "set(all_matching) if pos: ks_s = ma.get_pos_keys(logs) else: ks_s = ma.get_keys(logs) ks_s = set(ks_s)", "dupchk, listify, w_str # ------------------------------------------------------------------------------ # Html file (including navigation and sections) class", "= ' ' # HTML prefix before tables prefix = textwrap.dedent(\"\"\"\\ <!DOCTYPE html>", "= [ # Simple scopes, global memory c(L.is_warp, L.is_global), c(L.is_cta, L.is_global), c(L.is_ker, L.is_global),", "<td>thread&nbsp;synchronisation</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td>&#x2713;</td><td>&#x2713;</td>", "Subcommands # ############### ### Produce table with sections according to axioms def classified(args):", "s += ' <th>Memory map</th>\\n' s += ' <th>Name</th>\\n' for log in logs:", "produce_table(ks, logs, diro='entries'): logs = [ l for l in logs if l.any_key(ks)", "& \\multicolumn{4}{h}{MS} & \\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra Incantations:}", "'iriw', 'r']), ('thin air', ['lb']) ] lfs = collections.OrderedDict(lfs) for sf, cfg, suf", "'.tex' p5.add_argument('-o', '--out', action='store', default=f) p5.set_defaults(func=partial(mux, latex)) # Latex 2 p6 = sp.add_parser(cmds[5],", "ks_s: h.new_section(name, 0) # Now divide by other sections filters = get_section_filters() names", "args.pos logs = args.input assert(type(logs) == ma.Log) n = 8 l = ['CO',", "option_string=None): setattr(namespace, self.dest, values) def get_cmdline_parser(cmds): # Parent of all p = argparse.ArgumentParser()", "contains the litmus test code. When a dash appears instead of a result,", "+= ' <th>Name</th>\\n' for log in logs: # Remove directory prefix and suffix", "s = header + fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s", "we do not want to read an incantation log) if f != incantations", "\\multicolumn{4}{c}{GBC+MS}\\\\ & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:} & \\multicolumn{4}{h}{Extra Incantations:} & \\multicolumn{4}{c}{Extra Incantations:}\\\\", "mem_stress, general_bc, barrier, rand_threads s += ' & ' + str(entry) s +=", "== chip_old: break chip_old = chip assert(type(chip) == str) log = ma.get_logs(log, lh=ma.LogInc)", "logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l = get_axiom_patterns() h = HtmlFile()", "3 p7 = sp.add_parser(cmds[6], parents=[parent]) p7.add_argument('input', action=InputAction) f = cmds[6] + '.tex' p7.add_argument('-o',", "for name, val in l: ks_s = ma.get_matching_keys(val, logs) if pos: ks_s =", "def finish(self, nav=True): self.close_nav(-1) l = [self.prefix] if nav: l += [self.nav] l", "def new_section(self, heading, level): assert(0 <= level <= 2) l = str(level+2) s", "to the function def mux(f, args): inp = args.input l = list(listify(inp)) if", "= sep.join(chunk) s += line + ((nl - len(chunk)) * sep) + end", "['corr', 'corw', 'cowr', 'coww']), ('observation', ['mp', 'isa2', 'wrc']), ('prop light', ['2+2w', 'w+rw+2w', 's']),", "pos: ks_s = ma.get_pos_keys(logs) else: ks_s = ma.get_keys(logs) ks_s = set(ks_s) - all_matching", "a test, click on the corresponding number. The logfile also contains the litmus", "argparse.ArgumentParser(add_help=False) parent.add_argument('-p', '--pos', action='store_true') # Subparsers sp = p.add_subparsers(help='use <subcommand> -h for further", "= ma.get_entry(k, logs) l[i] += str(e.pos) + '\\\\\\\\' s = '\\n'.join(l) w_str(args.out, s)", "'\">' + link +\\ '</a></li>\\n' elif level == self.last_level + 1: self.nav +=", "'\\\\\\\\\\n' s += '\\\\hline\\n' s += '\\\\end{tabular}\\n' # Write table to file f_out", "</td><td> </td><td> </td><td> </td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr>", "= ' & ' s = '\\midrule\\n' def mapper(k): e = ma.get_entry(k, logs)", "l[:nl] line = sep.join(chunk) s += line + ((nl - len(chunk)) * sep)", "e = ma.get_entry(k, logs) l.append(e.short_name.lower() + sep + str(e.pos) + sep) # s1", "l[i] += str(e.pos) + '\\\\\\\\' s = '\\n'.join(l) w_str(args.out, s) ### Produce latex", "or because there were insufficient resources on the chip to run the test.", "litmus log e.store_log_dir(diro) else: s += '<td><a href=\"\">---</a></td>\\n' s += '</tr>\\n' s +=", "s = s.replace('<config>', cfg, 1) s = s.replace('<chip>', chip, 1) l1 = list(filter(sf,", ">> 2 i3 = (i & 0b0010) >> 1 i4 = (i &", "= ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 0) s = produce_table(ks, logs, diro=args.diro) h.add_html(s)", "fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)] fs2 = [lambda e:", "'diro')) chip = os.path.basename(log) assert(type(chip) == str) chip_old = chip while True: chip", "L.does_match(e, lc)) ks3 = ma.get_filtered_keys(f, logs, ks) ks3.sort() assert(len(ks3) == n) for i,", "<td>16</td> </tr> <tr> <td>memory&nbsp;stress</td> <td> </td><td> </td><td> </td><td> </td> <td> </td><td> </td><td> </td><td>", "& 0b1000) >> 3 i2 = (i & 0b0100) >> 2 i3 =", "in this file # are ignored; non-existing tests and non-existing entries (e.g. for", "test results\"\"\" sp = ' ' # HTML prefix before tables prefix =", "Write table to file f_out = out_base + '-' + suf + '.html'", "according to scopes and memory regions def sections(args): pos = args.pos logs =", "== 1) log = log[0] # Prefix of output filename, default is the", "<td style=\"text-align:left\"> <config> </td> </tr> </table> </center> <br> <table> <tr> <td> </td> <td>1</td>", "e: L.is_warp(e) and L.is_global(e), 'All threads in different warps, global memory', 's1-global'), (lambda", "by other sections filters = get_section_filters() names = get_section_names() for f, name in", "</td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> <td> </td><td> </td><td> </td><td> </td> <td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td><td>&#x2713;</td> </tr> <tr>", "s) def latex2(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log) sep", "p2.add_argument('input', nargs='+', action=InputAction) f = cmds[1] + '.html' p2.add_argument('-o', '--out', action='store', default=f) p2.add_argument('-d',", "s += item.pp_cell_link_dir(2, args.diro) # Produce file containing raw litmus log item.store_log_dir(args.diro) else:", "n) + \"\\\\\\\\\\n\" header += '\\midrule\\n' s = header + fill_up(l, sep, '\\\\\\\\\\n',", "p6.add_argument('-o', '--out', action='store', default=f) p6.set_defaults(func=partial(mux, latex2)) # Latex 3 p7 = sp.add_parser(cmds[6], parents=[parent])", "s += latex_tbl(f, logs, n) w_str(args.out, s) def latex2(args): pos = args.pos logs", "n) s = fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s ###", "logs: list of log objects (only logs which have the key are included", "= set(ks_s) - all_matching ks_s = list(ks_s) if ks_s: h.new_section('Other', 0) ks_s.sort() filters", "((nl - len(chunk)) * sep) + end l = l[nl:] return s def", "fill_up(l, sep, '\\\\\\\\\\n', n) s += '\\\\bottomrule\\n' return s def latex_tbl2(f, logs, n):", "filtering according to # names def get_section_filters(): def c(f, g): return lambda e:", "and filename suffix sfs = [ (lambda e: L.is_warp(e) and L.is_global(e), 'All threads", "w_str(args.out, s) def latex2(args): pos = args.pos logs = args.input assert(type(logs) == ma.Log)", "= self.sp li = sp * (level + 1) ul = sp *", "s = latex_tbl2(f, logs, n) s += '\\n' # Produce d-warp:s-cta table, shared", "def classified(args): pos = args.pos logs = args.input assert(lty(logs, ma.Log)) assert(hasattr(args, 'diro')) l", "all HTML file producers # ks: list of test names to include in", "row s += '<tr>\\n' le = ma.get_entry(k, logs) s += le.pp_prefix(2) for log", "assert(type(log) == str) # Get chip name chip = os.path.basename(log) assert(type(chip) == str)", "'--out', action='store', default=f) p2.add_argument('-d', '--diro', action='store', default='entries') p2.set_defaults(func=partial(mux, classified)) # Sections p3 =", "s = produce_table(ks, logs, diro=args.diro) h.add_html(s) all_matching = set(all_matching) if pos: ks =", "of test s += t for i in range(0, nc): i1 = (i", "(L.is_warp(e) or L.does_match(e, lc)) ks2 = ma.get_filtered_keys(f, logs, ks) ks2.sort() assert(len(ks2) == n)", "\\ ((L.is_warp(e) and L.does_match(e, l)) or (L.does_match(e, ['CoWW', 'COWW']))) s += latex_tbl2(f, logs,", "Scope and mem filters, including table description and filename suffix sfs = [", "if pos: ks = ma.get_pos_keys(logs, ks) if ks: h.new_section(name, 1) s = produce_table(ks,", "'s1-global'), (lambda e: L.is_warp(e) and L.is_shared(e), 'All threads in different warps, shared memory',", "incantations_html_flat: c = type(inp) is list if not c: inp = [inp] inp", "### Used by all HTML file producers # ks: list of test names", "+= ' & ' + str(entry) s += '\\\\\\\\\\n' s += '\\\\hline\\n' s", "ks = ma.get_keys(logs) s = produce_table(ks, logs, diro=args.diro) h = HtmlFile() h.add_html(s) h.finish(nav=False)", "read an incantation log) if f != incantations and f != incantations_flat and", "unpickle class InputAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) def", "collections import textwrap from functools import partial import machinery as ma from machinery", "<!DOCTYPE html> <html> <head> <meta charset=\"UTF-8\"> <title>GPU Litmus Test Results</title> <link rel=\"stylesheet\" href=\"common.css\"", "# Produce d-warp:s-cta table, global memory f = lambda e: L.is_global(e) and \\", "# logs: list of log objects (only logs which have the key are", "'w') f.write(self.s) f.close() # ------------------------------------------------------------------------------ ### Used by all HTML file producers #", "memory' ] return names # Get key patterns per axiom def get_axiom_patterns(): l" ]
[ "result arr1 = [1, 2, 2, 2, 3 ] arr2 = [2] print(arr_diff(arr1,", "return arr1 result = list() for i in range(0, len(arr1)): flag = False", "= list() for i in range(0, len(arr1)): flag = False for j in", "arr2[j]: flag = True if not flag: result.append(arr1[i]) return result arr1 = [1,", "= True if not flag: result.append(arr1[i]) return result arr1 = [1, 2, 2,", "in range(0, len(arr1)): flag = False for j in range(0, len(arr2)): if arr1[i]", "arr1 result = list() for i in range(0, len(arr1)): flag = False for", "len(arr2) == 0: return arr1 result = list() for i in range(0, len(arr1)):", "j in range(0, len(arr2)): if arr1[i] == arr2[j]: flag = True if not", "flag = False for j in range(0, len(arr2)): if arr1[i] == arr2[j]: flag", "for j in range(0, len(arr2)): if arr1[i] == arr2[j]: flag = True if", "def arr_diff(arr1, arr2): if len(arr2) == 0: return arr1 result = list() for", "result.append(arr1[i]) return result arr1 = [1, 2, 2, 2, 3 ] arr2 =", "range(0, len(arr2)): if arr1[i] == arr2[j]: flag = True if not flag: result.append(arr1[i])", "result = list() for i in range(0, len(arr1)): flag = False for j", "arr1 = [1, 2, 2, 2, 3 ] arr2 = [2] print(arr_diff(arr1, arr2))", "range(0, len(arr1)): flag = False for j in range(0, len(arr2)): if arr1[i] ==", "arr2): if len(arr2) == 0: return arr1 result = list() for i in", "flag = True if not flag: result.append(arr1[i]) return result arr1 = [1, 2,", "return result arr1 = [1, 2, 2, 2, 3 ] arr2 = [2]", "arr_diff(arr1, arr2): if len(arr2) == 0: return arr1 result = list() for i", "if len(arr2) == 0: return arr1 result = list() for i in range(0,", "i in range(0, len(arr1)): flag = False for j in range(0, len(arr2)): if", "for i in range(0, len(arr1)): flag = False for j in range(0, len(arr2)):", "0: return arr1 result = list() for i in range(0, len(arr1)): flag =", "len(arr1)): flag = False for j in range(0, len(arr2)): if arr1[i] == arr2[j]:", "flag: result.append(arr1[i]) return result arr1 = [1, 2, 2, 2, 3 ] arr2", "False for j in range(0, len(arr2)): if arr1[i] == arr2[j]: flag = True", "== 0: return arr1 result = list() for i in range(0, len(arr1)): flag", "in range(0, len(arr2)): if arr1[i] == arr2[j]: flag = True if not flag:", "if not flag: result.append(arr1[i]) return result arr1 = [1, 2, 2, 2, 3", "not flag: result.append(arr1[i]) return result arr1 = [1, 2, 2, 2, 3 ]", "if arr1[i] == arr2[j]: flag = True if not flag: result.append(arr1[i]) return result", "True if not flag: result.append(arr1[i]) return result arr1 = [1, 2, 2, 2,", "= False for j in range(0, len(arr2)): if arr1[i] == arr2[j]: flag =", "<gh_stars>1-10 def arr_diff(arr1, arr2): if len(arr2) == 0: return arr1 result = list()", "list() for i in range(0, len(arr1)): flag = False for j in range(0,", "== arr2[j]: flag = True if not flag: result.append(arr1[i]) return result arr1 =", "arr1[i] == arr2[j]: flag = True if not flag: result.append(arr1[i]) return result arr1", "len(arr2)): if arr1[i] == arr2[j]: flag = True if not flag: result.append(arr1[i]) return" ]
[ "p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje de los hombres es: \"+str(p_hombres)+\" %\") print(\"El porcentaje", "total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje de los hombres es: \"+str(p_hombres)+\" %\")", "de hombres \")) n_mujeres=int(input(\"Ingrese el número de mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100)", "el número de hombres \")) n_mujeres=int(input(\"Ingrese el número de mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total)", "porcentaje de los hombres es: \"+str(p_hombres)+\" %\") print(\"El porcentaje de las mujeres es:", "Numero_hombres-->int-->n_hombres Numero_mujeres-->int-->n_mujeres Salidas porcentaje_hombres-->float-->p_hombres porcentaje_mujeres-->float-->p_mujeres \"\"\" n_hombres=int(input(\"Ingrese el número de hombres \")) n_mujeres=int(input(\"Ingrese", "de mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje de los hombres", "n_mujeres=int(input(\"Ingrese el número de mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje", "Entradas Numero_hombres-->int-->n_hombres Numero_mujeres-->int-->n_mujeres Salidas porcentaje_hombres-->float-->p_hombres porcentaje_mujeres-->float-->p_mujeres \"\"\" n_hombres=int(input(\"Ingrese el número de hombres \"))", "porcentaje_hombres-->float-->p_hombres porcentaje_mujeres-->float-->p_mujeres \"\"\" n_hombres=int(input(\"Ingrese el número de hombres \")) n_mujeres=int(input(\"Ingrese el número de", "los hombres es: \"+str(p_hombres)+\" %\") print(\"El porcentaje de las mujeres es: \"+str(p_mujeres)+\" %\")", "p_mujeres=float(p_m*100) print(\"El porcentaje de los hombres es: \"+str(p_hombres)+\" %\") print(\"El porcentaje de las", "p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje de los hombres es: \"+str(p_hombres)+\" %\") print(\"El", "n_hombres=int(input(\"Ingrese el número de hombres \")) n_mujeres=int(input(\"Ingrese el número de mujeres \")) total=int(n_hombres+n_mujeres)", "Numero_mujeres-->int-->n_mujeres Salidas porcentaje_hombres-->float-->p_hombres porcentaje_mujeres-->float-->p_mujeres \"\"\" n_hombres=int(input(\"Ingrese el número de hombres \")) n_mujeres=int(input(\"Ingrese el", "Salidas porcentaje_hombres-->float-->p_hombres porcentaje_mujeres-->float-->p_mujeres \"\"\" n_hombres=int(input(\"Ingrese el número de hombres \")) n_mujeres=int(input(\"Ingrese el número", "de los hombres es: \"+str(p_hombres)+\" %\") print(\"El porcentaje de las mujeres es: \"+str(p_mujeres)+\"", "mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje de los hombres es:", "hombres \")) n_mujeres=int(input(\"Ingrese el número de mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100)", "print(\"El porcentaje de los hombres es: \"+str(p_hombres)+\" %\") print(\"El porcentaje de las mujeres", "\"\"\" Entradas Numero_hombres-->int-->n_hombres Numero_mujeres-->int-->n_mujeres Salidas porcentaje_hombres-->float-->p_hombres porcentaje_mujeres-->float-->p_mujeres \"\"\" n_hombres=int(input(\"Ingrese el número de hombres", "el número de mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje de", "\"\"\" n_hombres=int(input(\"Ingrese el número de hombres \")) n_mujeres=int(input(\"Ingrese el número de mujeres \"))", "porcentaje_mujeres-->float-->p_mujeres \"\"\" n_hombres=int(input(\"Ingrese el número de hombres \")) n_mujeres=int(input(\"Ingrese el número de mujeres", "p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje de los hombres es: \"+str(p_hombres)+\" %\") print(\"El porcentaje de", "número de mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje de los", "\")) n_mujeres=int(input(\"Ingrese el número de mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El", "número de hombres \")) n_mujeres=int(input(\"Ingrese el número de mujeres \")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total)", "\")) total=int(n_hombres+n_mujeres) p_h=float(n_hombres/total) p_m=float(n_mujeres/total) p_hombres=float(p_h*100) p_mujeres=float(p_m*100) print(\"El porcentaje de los hombres es: \"+str(p_hombres)+\"" ]
[]
[ "config.gpu_options.allow_growth = True sess = tf.Session(config=config) nact = ac_space.n nbatch = nenvs*nsteps ADV", "rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions,", "u1, u2, values, summary=False) nseconds = time.time() - tstart fps = int((update *", "import random def gs(x): return x.get_shape().as_list() class Model(object): def __init__(self, policy, ob_space, ac_space,", "def update_obs(self, obs): # Do frame-stacking here instead of the FrameStack wrapper to", "nbatch) / nseconds) ev = explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev))", "in v.name] pg_grads = tf.gradients(pg_loss, policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss = pg_loss", "self.obs[n] = self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of steps to batch of rollouts", "for e_grad, p_grad, param in zip(entropy_grads, pg_grads, policy_params): grad = -e_grad * ent_coef", "baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.atari_wrappers import wrap_deepmind from baselines.a2c.utils import discount_with_dones, jacobian from", "import numpy as np import tensorflow as tf from baselines import logger from", "in enumerate(dones): if done: self.obs[n] = self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of steps", "rewards mb_rewards = mb_rewards.flatten() mb_actions = mb_actions.flatten() mb_values = mb_values.flatten() mb_masks = mb_masks.flatten()", "_obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards = self.rewards[-100:] # make numpy", "mb_masks.flatten() return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values class RolloutRunner(Runner): def __init__(self, env,", "nseconds) ev = explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv)", "return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_u1, mb_u2, END def learn(policy, env,", "value_loss, policy_entropy, lv, _ = sess.run( [self.sum_op, pg_loss, vf_loss, entropy, mlgv, _train], td_map", "total_timesteps=int(80e6), lrschedule='linear', logdir=None): config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True sess =", "train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss, policy_params) sm = tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A * (1.", "policy_params, grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p, [-1]) for p in pg_grads], 0) cv_grad_splits =", "of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions =", "0 self._num_steps = 0 self.rewards = [] def run(self): # reset env self.obs", "= np.asarray(_rewards) mb_actions = np.asarray(_actions) mb_values = np.asarray(_values) mb_masks = np.asarray(_masks) mb_u1 =", "of the FrameStack wrapper to reduce # IPC overhead self.obs = np.roll(self.obs, shift=-self.n_in,", "dtype=np.float32).swapaxes(1, 0) mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) # discount/bootstrap off value fn _obs,", "== 1: obs, states, rewards, masks, actions, values, u1, u2, END = runner.run()", "import wrap_deepmind from baselines.a2c.utils import discount_with_dones, jacobian from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables", "import logging import numpy as np import tensorflow as tf from baselines import", "= np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)", "* nbatch) / nseconds) ev = explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\",", "_train], td_map ) self._step += 1 return policy_loss, value_loss, policy_entropy, lv def save(save_path):", "in zip(pg_grads, ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p, [-1]) for", "for i in range(self.nenv)] mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_u1, mb_u2 = [],", "else: rewards = discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] = rewards mb_rewards = mb_rewards.flatten() mb_actions", "= np.asarray(_masks) mb_u1 = np.asarray(_u1) mb_u2 = np.asarray(_u2) self._num_rollouts += 1 self._num_steps +=", "#print(\"Episode {}, Ave R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\",", "(rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist()", "= episode_over[n]+1 # crop out only played experience obs = obs[:episode_length] rewards =", "tf.summary.histogram(v.name+\"_grad\", g) self.sum_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with", "mb_actions = np.asarray(_actions) mb_values = np.asarray(_values) mb_masks = np.asarray(_masks) mb_u1 = np.asarray(_u1) mb_u2", "= self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap off value fn for n, (rewards, dones, value)", "self.obs = np.zeros(self.obs.shape) obs = self.env.reset() self.update_obs(obs) # run env until all threads", "values) nseconds = time.time() - tstart fps = int((update * nbatch) / nseconds)", "this episode episode_length = episode_over[n]+1 # crop out only played experience obs =", "ob_space, ac_space, nenvs, nsteps, nstack, num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6),", "in params if \"pi\" in v.name] vf_params = [v for v in params", "= tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)", "gamma=gamma) nbatch = nenvs*nsteps tstart = time.time() for update in range(1, total_timesteps//nbatch+1): if", "dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards = self.rewards[-100:] #", "learn(policy, env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100,", "all([e >= 0 for e in episode_over]): actions, u1, u2, values, states =", "nsteps self.states = model.initial_state self.dones = [False for _ in range(nenv)] def update_obs(self,", "g in pg_grads], 0) all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean =", "= step_model.value self.initial_state = step_model.initial_state self.save = save self.load = load tf.global_variables_initializer().run(session=sess) class", "= nenv self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack) self.obs = np.zeros((nenv, self.n_in*nstack)) obs = env.reset()", "[], [] for n, (obs, rewards, actions, values, dones, masks, u1, u2) in", "[] def run(self): # reset env self.obs = np.zeros(self.obs.shape) obs = self.env.reset() self.update_obs(obs)", "= ema.average(all_policy_grads_sq) em_var = em_mean_sq - tf.square(em_mean) em_log_var = tf.log(em_var + 1e-20) mlgv", "td_map ) self._step += 1 return policy_loss, value_loss, policy_entropy, lv def save(save_path): ps", "model self.n_in, = env.observation_space.shape nenv = env.num_envs self.nenv = nenv self.batch_ob_shape = (nenv*nsteps,", "the FrameStack wrapper to reduce # IPC overhead self.obs = np.roll(self.obs, shift=-self.n_in, axis=1)", "rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]", "= tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads) lr =", "summary=False): advs = rewards - values for step in range(len(obs)): cur_lr = lr.value()", "policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [self.sum_op, pg_loss, vf_loss, entropy, mlgv, _train],", "inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True sess = tf.Session(config=config) nact = ac_space.n nbatch = nenvs*nsteps", "trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step = 0 def train(obs, states, rewards,", "mb_masks = mb_masks.flatten() return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values class RolloutRunner(Runner): def", "value_loss, policy_entropy, lv, _ = sess.run( [pg_loss, vf_loss, entropy, mlgv, _train], td_map )", "self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of steps to batch of rollouts mb_obs =", "mb_dones.append(self.dones) # batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0)", "summary=False) nseconds = time.time() - tstart fps = int((update * nbatch) / nseconds)", "self.states for n in range(self.nsteps): actions, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs))", "rewards = discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] = rewards mb_rewards = mb_rewards.flatten() mb_actions =", "os.path as osp import gym import time import joblib import logging import numpy", "played experience obs = obs[:episode_length] rewards = rewards[:episode_length] actions = actions[:episode_length] values =", "last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards =", "tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step = 0 def", "in v.name] entropy_grads = tf.gradients(entropy, policy_params) ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads =", "FRAME STACK ave_r = np.mean(self.rewards) #print(\"Episode {}, Ave R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r)", "END: break policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2,", "policy_loss, value_loss, policy_entropy, lv def save(save_path): ps = sess.run(params) make_path(save_path) joblib.dump(ps, save_path) def", "= u2[:episode_length] assert dones[-1] == True masks = masks[:episode_length] # discount the rewards", "[], [], [], [], [], [], [] for n, (obs, rewards, actions, values,", "joblib.load(load_path) restores = [] for p, loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps =", "class Runner(object): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): self.env = env self.model", "= dones.tolist() masks = masks.tolist() u1, u2 = u1.tolist(), u2.tolist() # get length", "np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones", "restores = [] for p, loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps = sess.run(restores)", "1:] last_values = self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap off value fn for n, (rewards,", "tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads) lr = Scheduler(v=lr,", "not all([e >= 0 for e in episode_over]): actions, u1, u2, values, states", "states self.dones = dones for n, done in enumerate(dones): if done: self.obs[n] =", "rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] =", "runner.run() if END: break policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks,", "= 0 def train(obs, states, rewards, masks, u1, u2, values, summary=False): advs =", "length of this episode episode_length = episode_over[n]+1 # crop out only played experience", "= discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] = rewards", "#print(self._num_steps, len(rewards)) #if self._num_steps > 5000000: if np.mean(self.rewards) >= 195.:#195.: #if self._num_rollouts >", "logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else: obs, states, rewards, masks, actions,", "dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones =", "tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32, []) step_model = policy(sess,", "masks, u1, u2, values) nseconds = time.time() - tstart fps = int((update *", "= np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1]", "oh_A * (1. - sm) + (1. - oh_A) * (-sm) pi_grads =", "= tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p, [-1]) for p in pg_grads], 0)", "values, summary=False): advs = rewards - values for step in range(len(obs)): cur_lr =", "save self.load = load tf.global_variables_initializer().run(session=sess) class Runner(object): def __init__(self, env, model, nsteps=5, nstack=4,", "while not all([e >= 0 for e in episode_over]): actions, u1, u2, values,", "\"vf\" in v.name] entropy_grads = tf.gradients(entropy, policy_params) ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads", "frame-stacking here instead of the FrameStack wrapper to reduce # IPC overhead self.obs", "mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs, rewards, dones, _ = self.env.step(actions) self.states = states self.dones", "g, v in grads: print(v.name, g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op = tf.summary.merge_all()", "(-sm) pi_grads = -((tf.expand_dims(R, 1) - train_model.vf_t) * dlogp_dpi) pg_grads = tf.gradients(train_model.pi, policy_params,", "= np.mean(self.rewards) #print(\"Episode {}, Ave R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\",", "schedule=lrschedule) self._step = 0 def train(obs, states, rewards, masks, u1, u2, values, summary=False):", "= ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean = ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq) em_var = em_mean_sq -", "train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr } if states != []: td_map[train_model.S] = states td_map[train_model.M]", "masks, u1, u2, values, summary=False): advs = rewards - values for step in", "discount/bootstrap off value fn _obs, _rewards, _actions, _values, _masks, _u1, _u2 = [],", "tf.log(em_var + 1e-20) mlgv = tf.reduce_mean(em_log_var) for g, v in grads: print(v.name, g)", "np.asarray(_rewards) mb_actions = np.asarray(_actions) mb_values = np.asarray(_values) mb_masks = np.asarray(_masks) mb_u1 = np.asarray(_u1)", "logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards) * 4) #logger.dump_tabular() END = False #print(self._num_steps,", "import cat_entropy, mse import random def gs(x): return x.get_shape().as_list() class Model(object): def __init__(self,", "= sess.run(restores) self.train = train self.train_model = train_model self.step_model = step_model self.step =", "sess.run( [pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self._step += 1 return policy_loss,", "R:rewards, LR:cur_lr } if states != []: td_map[train_model.S] = states td_map[train_model.M] = masks", "log_interval == 0 or update == 1: obs, states, rewards, masks, actions, values,", "[], [], [] mb_states = self.states step = 0 while not all([e >=", "dones, masks, u1, u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_masks, mb_u1, mb_u2)):", "= mb_values.flatten() mb_masks = mb_masks.flatten() return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values class", "param in zip(entropy_grads, pg_grads, policy_params): grad = -e_grad * ent_coef + p_grad policy_grads.append(grad)", "self.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] = rewards mb_rewards = mb_rewards.flatten()", "policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values) nseconds = time.time()", "g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir) trainer =", "= mb_dones[:, 1:] mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0)", "mlgv, _train], td_map ) self._step += 1 return policy_loss, value_loss, policy_entropy, lv def", "dones, self.gamma) mb_rewards[n] = rewards mb_rewards = mb_rewards.flatten() mb_actions = mb_actions.flatten() mb_values =", "params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R) if train_model.relaxed: pg_loss = tf.constant(0.0) oh_A", "td_map ) self.writer.add_summary(sum_str, self._step) else: policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [pg_loss,", "(1. - sm) + (1. - oh_A) * (-sm) pi_grads = -((tf.expand_dims(R, 1)", "0: rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.gamma) mb_rewards[n]", "= explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else:", "= u1[:episode_length] u2 = u2[:episode_length] assert dones[-1] == True masks = masks[:episode_length] #", "states, rewards, masks, u1, u2, values) nseconds = time.time() - tstart fps =", "= args.vf_coef nenvs = env.num_envs ob_space = env.observation_space ac_space = env.action_space num_procs =", "= np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:, -self.n_in:] = obs[:, :self.n_in] def run(self): mb_obs, mb_rewards,", "of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards,", "set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.atari_wrappers import wrap_deepmind from baselines.a2c.utils import", "self.obs[:, -self.n_in:] = obs[:, :self.n_in] def run(self): mb_obs, mb_rewards, mb_actions, mb_values, mb_dones =", "0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones =", "self._step += 1 return policy_loss, value_loss, policy_entropy, lv def save(save_path): ps = sess.run(params)", "rewards.tolist() self.rewards.append(sum(rewards)) actions = actions.tolist() values = values.tolist() dones = dones.tolist() masks =", "td_map = { train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr } if states !=", "dtype=np.int32).swapaxes(1, 0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks", "cat_entropy, mse import random def gs(x): return x.get_shape().as_list() class Model(object): def __init__(self, policy,", "0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks =", "e_grad, p_grad, param in zip(entropy_grads, pg_grads, policy_params): grad = -e_grad * ent_coef +", "1 self._num_steps += len(rewards) * 4 # FRAME STACK ave_r = np.mean(self.rewards) #print(\"Episode", "nsteps=5, nstack=4, gamma=0.99): super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts = 0 self._num_steps =", "in range(len(obs)): cur_lr = lr.value() td_map = { train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards,", "u2, values, summary=False): advs = rewards - values for step in range(len(obs)): cur_lr", "policy_params = [v for v in params if \"pi\" in v.name] vf_params =", "tstart = time.time() for update in range(1, total_timesteps//nbatch+1): if True: #update % log_interval", "masks[:episode_length] # discount the rewards rewards = discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions)", "load tf.global_variables_initializer().run(session=sess) class Runner(object): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): self.env =", "grads: print(v.name, g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir)", "ac_space.n nbatch = nenvs*nsteps ADV = tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32, [None]) LR", "self.states step = 0 while not all([e >= 0 for e in episode_over]):", "mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) # discount/bootstrap off value fn _obs, _rewards, _actions,", "def load(load_path): loaded_params = joblib.load(load_path) restores = [] for p, loaded_p in zip(params,", "oh_A = tf.one_hot(train_model.a0, ac_space.n) params = find_trainable_variables(\"model\") policy_params = [v for v in", "[v for v in params if \"vf\" in v.name] entropy_grads = tf.gradients(entropy, policy_params)", "_u2.extend(u2) self.rewards = self.rewards[-100:] # make numpy mb_obs = np.asarray(_obs) mb_rewards = np.asarray(_rewards)", "mb_values, mb_u1, mb_u2, END def learn(policy, env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5,", "env.reset() self.update_obs(obs) self.gamma = gamma self.nsteps = nsteps self.states = model.initial_state self.dones =", "entropy, mlgv, _train], td_map ) self._step += 1 return policy_loss, value_loss, policy_entropy, lv", "= mb_dones[:, 1:] last_values = self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap off value fn for", "make numpy mb_obs = np.asarray(_obs) mb_rewards = np.asarray(_rewards) mb_actions = np.asarray(_actions) mb_values =", "tf.gradients(entropy, policy_params) ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss, policy_params) sm =", "pg, dg in zip(pg_grads, ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p,", "until all threads finish episode_over = [-1 for i in range(self.nenv)] mb_obs, mb_rewards,", "td_map[train_model.S] = states td_map[train_model.M] = masks if summary: sum_str, policy_loss, value_loss, policy_entropy, lv,", "[] for p, loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps = sess.run(restores) self.train =", "= g grads = [grad_dict[v] for v in params] print(grads) else: pg_loss =", "env.num_envs ob_space = env.observation_space ac_space = env.action_space num_procs = len(env.remotes) # HACK model", "reduce # IPC overhead self.obs = np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:, -self.n_in:] = obs[:,", "in range(1, total_timesteps//nbatch+1): if True: #update % log_interval == 0 or update ==", "total_timesteps//nbatch+1): if True: #update % log_interval == 0 or update == 1: obs,", "em_mean_sq - tf.square(em_mean) em_log_var = tf.log(em_var + 1e-20) mlgv = tf.reduce_mean(em_log_var) for g,", "vf_coef cv_grads = tf.gradients(vf_loss, vf_params) policy_grads = [] for e_grad, p_grad, param in", "batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions", "masks = masks.tolist() u1, u2 = u1.tolist(), u2.tolist() # get length of this", "def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts", "False #print(self._num_steps, len(rewards)) #if self._num_steps > 5000000: if np.mean(self.rewards) >= 195.:#195.: #if self._num_rollouts", "[]: td_map[train_model.S] = states td_map[train_model.M] = masks if summary: sum_str, policy_loss, value_loss, policy_entropy,", "step_model.initial_state self.save = save self.load = load tf.global_variables_initializer().run(session=sess) class Runner(object): def __init__(self, env,", "config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True sess = tf.Session(config=config) nact =", "env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, logdir=None,", "0 if episode_over[n] == -1: episode_over[n] = step self.update_obs(obs) mb_rewards.append(rewards) step += 1", "u1, u2, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1)", "rewards, dones, _ = self.env.step(actions) self.states = states self.dones = dones for n,", "time.time() for update in range(1, total_timesteps//nbatch+1): if True: #update % log_interval == 0", "self.states = states self.dones = dones for n, done in enumerate(dones): if done:", "= tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R) if train_model.relaxed: pg_loss =", "# FRAME STACK ave_r = np.mean(self.rewards) #print(\"Episode {}, Ave R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\",", "= find_trainable_variables(\"model\") policy_params = [v for v in params if \"pi\" in v.name]", "+ p_grad policy_grads.append(grad) grad_dict = {} for g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)):", "= [v for v in params if \"pi\" in v.name] pg_grads = tf.gradients(pg_loss,", "nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, logdir=None, bootstrap=False, args=None): tf.reset_default_graph()", "gamma=0.99): super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts = 0 self._num_steps = 0 self.rewards", "= tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A * (1. - sm) + (1. - oh_A)", "u1.tolist(), u2.tolist() # get length of this episode episode_length = episode_over[n]+1 # crop", "= tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R)", "labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R) if train_model.relaxed:", "rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards+[value],", "from baselines.common.atari_wrappers import wrap_deepmind from baselines.a2c.utils import discount_with_dones, jacobian from baselines.a2c.utils import Scheduler,", "1e-20) mlgv = tf.reduce_mean(em_log_var) for g, v in grads: print(v.name, g) tf.summary.histogram(v.name, v)", "for _ in range(nenv)] def update_obs(self, obs): # Do frame-stacking here instead of", "Ave R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards) *", "sess.run(params) make_path(save_path) joblib.dump(ps, save_path) def load(load_path): loaded_params = joblib.load(load_path) restores = [] for", "ac_space, nenvs, nsteps, nstack, reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params", "nenvs, nsteps, nstack, reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params =", "values, summary=False) nseconds = time.time() - tstart fps = int((update * nbatch) /", "u2.tolist() # get length of this episode episode_length = episode_over[n]+1 # crop out", "find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R) if train_model.relaxed: pg_loss = tf.constant(0.0) oh_A = tf.one_hot(train_model.a0,", "self.rewards = [] def run(self): # reset env self.obs = np.zeros(self.obs.shape) obs =", "loaded_params = joblib.load(load_path) restores = [] for p, loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p))", "all threads finish episode_over = [-1 for i in range(self.nenv)] mb_obs, mb_rewards, mb_actions,", "_masks, _u1, _u2 = [], [], [], [], [], [], [] for n,", "def train(obs, states, rewards, masks, u1, u2, values, summary=False): advs = rewards -", "for g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] = g grads = [grad_dict[v]", "model.initial_state self.dones = [False for _ in range(nenv)] def update_obs(self, obs): # Do", "= tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True sess = tf.Session(config=config) nact = ac_space.n", "tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads = [pg - dg for pg, dg in zip(pg_grads,", "+= len(rewards) * 4 # FRAME STACK ave_r = np.mean(self.rewards) #print(\"Episode {}, Ave", "float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else: obs, states, rewards, masks,", "= mb_rewards.flatten() mb_actions = mb_actions.flatten() mb_values = mb_values.flatten() mb_masks = mb_masks.flatten() return mb_obs,", ">= 0 for e in episode_over]): actions, u1, u2, values, states = self.model.step(self.obs,", "np.mean(self.rewards) >= 195.:#195.: #if self._num_rollouts > 1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END", "self.writer = tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads) lr", "out only played experience obs = obs[:episode_length] rewards = rewards[:episode_length] actions = actions[:episode_length]", "policy(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False) train_model = policy(sess, ob_space, ac_space, nenvs,", "= tf.concat([tf.reshape(g, [-1]) for g in pg_grads], 0) all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op =", "discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] = rewards mb_rewards", "self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs, rewards, dones, _ =", "_obs, _rewards, _actions, _values, _masks, _u1, _u2 = [], [], [], [], [],", "mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones,", "nenv self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack) self.obs = np.zeros((nenv, self.n_in*nstack)) obs = env.reset() self.update_obs(obs)", "np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:, -self.n_in:] = obs[:, :self.n_in] def run(self): mb_obs, mb_rewards, mb_actions,", "bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed) lr = args.lr vf_coef = args.vf_coef nenvs = env.num_envs", "total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, logdir=None, bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed)", "assert dones[-1] == True masks = masks[:episode_length] # discount the rewards rewards =", "= obs[:, :self.n_in] def run(self): mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[] mb_states", ") self.writer.add_summary(sum_str, self._step) else: policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [pg_loss, vf_loss,", "u1, u2, values) nseconds = time.time() - tstart fps = int((update * nbatch)", "tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p, [-1]) for p in pg_grads], 0) cv_grad_splits", "self.dones = [False for _ in range(nenv)] def update_obs(self, obs): # Do frame-stacking", "mb_u1.append(u1) mb_u2.append(u2) obs, rewards, dones, _ = self.env.step(actions) self.states = states self.dones =", "# Do frame-stacking here instead of the FrameStack wrapper to reduce # IPC", "[None]) LR = tf.placeholder(tf.float32, []) step_model = policy(sess, ob_space, ac_space, nenvs, 1, nstack,", "epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None): config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True sess", "self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs, rewards, dones, _ = self.env.step(actions) self.states =", "in pg_grads], 0) all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean = ema.average(all_policy_grads)", "gym import time import joblib import logging import numpy as np import tensorflow", "in grads: print(v.name, g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op = tf.summary.merge_all() self.writer =", "= time.time() - tstart fps = int((update * nbatch) / nseconds) ev =", "args.vf_coef nenvs = env.num_envs ob_space = env.observation_space ac_space = env.action_space num_procs = len(env.remotes)", "CnnPolicy from baselines.a2c.utils import cat_entropy, mse import random def gs(x): return x.get_shape().as_list() class", "> 1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END = True return mb_obs, mb_states,", "dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] last_values = self.model.value(self.obs,", "all_policy_grads_sq]) em_mean = ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq) em_var = em_mean_sq - tf.square(em_mean) em_log_var", "pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p, [-1]) for p in pg_grads],", "else: policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [pg_loss, vf_loss, entropy, mlgv, _train],", "#update % log_interval == 0 or update == 1: obs, states, rewards, masks,", "mb_rewards, mb_masks, mb_actions, mb_values class RolloutRunner(Runner): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99):", "1) - train_model.vf_t) * dlogp_dpi) pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads = [pg", "episode_over[n] == -1: episode_over[n] = step self.update_obs(obs) mb_rewards.append(rewards) step += 1 mb_dones.append(self.dones) #", "= np.asarray(_values) mb_masks = np.asarray(_masks) mb_u1 = np.asarray(_u1) mb_u2 = np.asarray(_u2) self._num_rollouts +=", "u1, u2 = u1.tolist(), u2.tolist() # get length of this episode episode_length =", "sess.run( [self.sum_op, pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self.writer.add_summary(sum_str, self._step) else: policy_loss,", "self.env.step(actions) self.states = states self.dones = dones for n, done in enumerate(dones): if", "self.update_obs(obs) self.gamma = gamma self.nsteps = nsteps self.states = model.initial_state self.dones = [False", "= [-1 for i in range(self.nenv)] mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_u1, mb_u2", "len(rewards) * 4) #logger.dump_tabular() END = False #print(self._num_steps, len(rewards)) #if self._num_steps > 5000000:", "import CnnPolicy from baselines.a2c.utils import cat_entropy, mse import random def gs(x): return x.get_shape().as_list()", "- entropy * ent_coef + vf_loss * vf_coef grads = tf.gradients(loss, params) grads", "policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values, summary=False) nseconds =", "value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1]", "[-1]) for g in pg_grads], 0) all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq])", "actions = actions[:episode_length] values = values[:episode_length] dones = dones[:episode_length] u1 = u1[:episode_length] u2", "END def learn(policy, env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99,", "nenvs, nsteps, nstack, num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None):", "def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): self.env = env self.model = model", "sm = tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A * (1. - sm) + (1. -", "grad = -e_grad * ent_coef + p_grad policy_grads.append(grad) grad_dict = {} for g,", "self.save = save self.load = load tf.global_variables_initializer().run(session=sess) class Runner(object): def __init__(self, env, model,", "pg_grads, policy_params): grad = -e_grad * ent_coef + p_grad policy_grads.append(grad) grad_dict = {}", "params) grads = list(zip(grads, params)) ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g, [-1]) for", "= dones for n, done in enumerate(dones): if done: self.obs[n] = self.obs[n] *", "mb_actions, mb_values class RolloutRunner(Runner): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): super().__init__(env, model,", "cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits * vf_coef cv_grads = tf.gradients(vf_loss, vf_params) policy_grads", "\"pi\" in v.name] pg_grads = tf.gradients(pg_loss, policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss =", "ddiff_grads = tf.gradients(ddiff_loss, policy_params) sm = tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A * (1. -", "mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values = np.asarray(mb_values,", "discount_with_dones, jacobian from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables from baselines.a2c.policies import CnnPolicy from", "- sm) + (1. - oh_A) * (-sm) pi_grads = -((tf.expand_dims(R, 1) -", "= 0 self.rewards = [] def run(self): # reset env self.obs = np.zeros(self.obs.shape)", "as tf from baselines import logger from baselines.common import set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env", "ob_space, ac_space, nenvs, nsteps, nstack, reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi))", "masks if summary: sum_str, policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [self.sum_op, pg_loss,", "logger.record_tabular(\"l\", len(rewards) * 4) #logger.dump_tabular() END = False #print(self._num_steps, len(rewards)) #if self._num_steps >", "osp import gym import time import joblib import logging import numpy as np", "> 5000000: if np.mean(self.rewards) >= 195.:#195.: #if self._num_rollouts > 1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\",", "* ent_coef + vf_loss * vf_coef grads = tf.gradients(loss, params) grads = list(zip(grads,", "0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values =", ">= 195.:#195.: #if self._num_rollouts > 1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END =", "ps = sess.run(params) make_path(save_path) joblib.dump(ps, save_path) def load(load_path): loaded_params = joblib.load(load_path) restores =", "= step_model self.step = step_model.step self.value = step_model.value self.initial_state = step_model.initial_state self.save =", "u2, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2)", "zip(entropy_grads, pg_grads, policy_params): grad = -e_grad * ent_coef + p_grad policy_grads.append(grad) grad_dict =", "Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon,", "discount the rewards rewards = discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks)", "if np.mean(self.rewards) >= 195.:#195.: #if self._num_rollouts > 1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular()", "# make numpy mb_obs = np.asarray(_obs) mb_rewards = np.asarray(_rewards) mb_actions = np.asarray(_actions) mb_values", "mb_states = self.states step = 0 while not all([e >= 0 for e", "import time import joblib import logging import numpy as np import tensorflow as", "policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [pg_loss, vf_loss, entropy, mlgv, _train], td_map", "mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2 =", "alpha=0.99, gamma=0.99, log_interval=100, logdir=None, bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed) lr = args.lr vf_coef =", "from baselines.a2c.policies import CnnPolicy from baselines.a2c.utils import cat_entropy, mse import random def gs(x):", "self.sum_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train", "= save self.load = load tf.global_variables_initializer().run(session=sess) class Runner(object): def __init__(self, env, model, nsteps=5,", "# discount/bootstrap off value fn _obs, _rewards, _actions, _values, _masks, _u1, _u2 =", "logdir=logdir) runner = RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch = nenvs*nsteps tstart =", "[], [], [] for n, (obs, rewards, actions, values, dones, masks, u1, u2)", "mb_u1, mb_u2)): # pull out data rewards = rewards.tolist() self.rewards.append(sum(rewards)) actions = actions.tolist()", "from baselines.common import set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.atari_wrappers import wrap_deepmind", "= tf.gradients(vf_loss, vf_params) policy_grads = [] for e_grad, p_grad, param in zip(entropy_grads, pg_grads,", "u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_masks, mb_u1, mb_u2)): # pull out", "mb_dones.append(self.dones) obs, rewards, dones, _ = self.env.step(actions) self.states = states self.dones = dones", "= values.tolist() dones = dones.tolist() masks = masks.tolist() u1, u2 = u1.tolist(), u2.tolist()", "IPC overhead self.obs = np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:, -self.n_in:] = obs[:, :self.n_in] def", "= step self.update_obs(obs) mb_rewards.append(rewards) step += 1 mb_dones.append(self.dones) # batch of steps to", "value fn _obs, _rewards, _actions, _values, _masks, _u1, _u2 = [], [], [],", "= masks.tolist() u1, u2 = u1.tolist(), u2.tolist() # get length of this episode", "[], [], [], [], [], [], [] mb_states = self.states step = 0", "obs = env.reset() self.update_obs(obs) self.gamma = gamma self.nsteps = nsteps self.states = model.initial_state", "SubprocVecEnv from baselines.common.atari_wrappers import wrap_deepmind from baselines.a2c.utils import discount_with_dones, jacobian from baselines.a2c.utils import", "= tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads = [pg - dg for pg, dg in", "# reset env self.obs = np.zeros(self.obs.shape) obs = self.env.reset() self.update_obs(obs) # run env", "_values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards = self.rewards[-100:] # make numpy mb_obs = np.asarray(_obs)", "= rewards mb_rewards = mb_rewards.flatten() mb_actions = mb_actions.flatten() mb_values = mb_values.flatten() mb_masks =", "mb_u1, mb_u2, END def learn(policy, env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear',", "step in range(len(obs)): cur_lr = lr.value() td_map = { train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs,", "* vf_coef cv_grads = tf.gradients(vf_loss, vf_params) policy_grads = [] for e_grad, p_grad, param", "pg_grads = tf.gradients(pg_loss, policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss = pg_loss - entropy", "train(obs, states, rewards, masks, u1, u2, values, summary=False): advs = rewards - values", "= RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch = nenvs*nsteps tstart = time.time() for", "sess.run(restores) self.train = train self.train_model = train_model self.step_model = step_model self.step = step_model.step", "ema.average(all_policy_grads_sq) em_var = em_mean_sq - tf.square(em_mean) em_log_var = tf.log(em_var + 1e-20) mlgv =", "nseconds = time.time() - tstart fps = int((update * nbatch) / nseconds) env.close()", "tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R) if train_model.relaxed: pg_loss = tf.constant(0.0)", "alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None): config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True", "} if states != []: td_map[train_model.S] = states td_map[train_model.M] = masks if summary:", "gamma=0.99, log_interval=100, logdir=None, bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed) lr = args.lr vf_coef = args.vf_coef", "baselines.a2c.policies import CnnPolicy from baselines.a2c.utils import cat_entropy, mse import random def gs(x): return", "states td_map[train_model.M] = masks if summary: sum_str, policy_loss, value_loss, policy_entropy, lv, _ =", "= [],[],[],[],[] mb_states = self.states for n in range(self.nsteps): actions, values, states =", "mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape)", "= tf.one_hot(train_model.a0, ac_space.n) params = find_trainable_variables(\"model\") policy_params = [v for v in params", "self.n_in*nstack) self.obs = np.zeros((nenv, self.n_in*nstack)) obs = env.reset() self.update_obs(obs) self.gamma = gamma self.nsteps", "baselines import logger from baselines.common import set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from", "= step_model.step self.value = step_model.value self.initial_state = step_model.initial_state self.save = save self.load =", "nstack, num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None): config =", "[v for v in params if \"pi\" in v.name] vf_params = [v for", "mb_u2 = [], [], [], [], [], [], [] mb_states = self.states step", "R)) loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef grads", "train_model.vf) tf.summary.histogram(\"R\", R) if train_model.relaxed: pg_loss = tf.constant(0.0) oh_A = tf.one_hot(train_model.a0, ac_space.n) params", "nstack=4, gamma=0.99): super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts = 0 self._num_steps = 0", "mb_rewards = np.asarray(_rewards) mb_actions = np.asarray(_actions) mb_values = np.asarray(_values) mb_masks = np.asarray(_masks) mb_u1", "env.observation_space.shape nenv = env.num_envs self.nenv = nenv self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack) self.obs =", "pg_grads], 0) all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean = ema.average(all_policy_grads) em_mean_sq", ") self._step += 1 return policy_loss, value_loss, policy_entropy, lv def save(save_path): ps =", "vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None): config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs)", "policy_entropy, lv, _ = sess.run( [pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self._step", "from baselines.a2c.utils import cat_entropy, mse import random def gs(x): return x.get_shape().as_list() class Model(object):", "True return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_u1, mb_u2, END def learn(policy,", "self.states, self.dones).tolist() #discount/bootstrap off value fn for n, (rewards, dones, value) in enumerate(zip(mb_rewards,", "_ = sess.run( [self.sum_op, pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self.writer.add_summary(sum_str, self._step)", "u2, values, summary=False) nseconds = time.time() - tstart fps = int((update * nbatch)", "* ent_coef + p_grad policy_grads.append(grad) grad_dict = {} for g, v in list(zip(policy_grads,", "lr = args.lr vf_coef = args.vf_coef nenvs = env.num_envs ob_space = env.observation_space ac_space", "def run(self): mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[] mb_states = self.states for", "float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else: obs, states, rewards, masks, actions, values,", "if done: self.obs[n] = self.obs[n] * 0 if episode_over[n] == -1: episode_over[n] =", "actions, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs, rewards,", "+ vf_loss * vf_coef grads = tf.gradients(loss, params) grads = list(zip(grads, params)) ema", "max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner = RolloutRunner(env, model, nsteps=nsteps, nstack=nstack,", "obs = obs[:episode_length] rewards = rewards[:episode_length] actions = actions[:episode_length] values = values[:episode_length] dones", "params if \"pi\" in v.name] vf_params = [v for v in params if", "nact = ac_space.n nbatch = nenvs*nsteps ADV = tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32,", "__init__(self, policy, ob_space, ac_space, nenvs, nsteps, nstack, num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99,", "p_grad policy_grads.append(grad) grad_dict = {} for g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v]", "v.name] pg_grads = tf.gradients(pg_loss, policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss = pg_loss -", "= ac_space.n nbatch = nenvs*nsteps ADV = tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32, [None])", "make_path, find_trainable_variables from baselines.a2c.policies import CnnPolicy from baselines.a2c.utils import cat_entropy, mse import random", "lrschedule='linear', logdir=None): config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True sess = tf.Session(config=config)", "195.:#195.: #if self._num_rollouts > 1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END = True", "ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p, [-1]) for p in", "= True sess = tf.Session(config=config) nact = ac_space.n nbatch = nenvs*nsteps ADV =", "LR:cur_lr } if states != []: td_map[train_model.S] = states td_map[train_model.M] = masks if", "policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values) nseconds", "step_model self.step = step_model.step self.value = step_model.value self.initial_state = step_model.initial_state self.save = save", "masks, actions, values, u1, u2, END = runner.run() if END: break policy_loss, value_loss,", "ent_coef + vf_loss * vf_coef grads = tf.gradients(loss, params) grads = list(zip(grads, params))", "import Scheduler, make_path, find_trainable_variables from baselines.a2c.policies import CnnPolicy from baselines.a2c.utils import cat_entropy, mse", "def gs(x): return x.get_shape().as_list() class Model(object): def __init__(self, policy, ob_space, ac_space, nenvs, nsteps,", "+= 1 mb_dones.append(self.dones) # batch of steps to batch of rollouts mb_obs =", "True sess = tf.Session(config=config) nact = ac_space.n nbatch = nenvs*nsteps ADV = tf.placeholder(tf.float32,", "states, rewards, masks, actions, values, u1, u2, END = runner.run() if END: break", "explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else: obs,", "policy_entropy, lv def save(save_path): ps = sess.run(params) make_path(save_path) joblib.dump(ps, save_path) def load(load_path): loaded_params", "dg in zip(pg_grads, ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p, [-1])", "[] mb_states = self.states step = 0 while not all([e >= 0 for", "mb_u2.append(u2) obs, rewards, dones, _ = self.env.step(actions) self.states = states self.dones = dones", "__init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): self.env = env self.model = model self.n_in,", "for v in params if \"vf\" in v.name] entropy_grads = tf.gradients(entropy, policy_params) ddiff_loss", "= (nenv*nsteps, self.n_in*nstack) self.obs = np.zeros((nenv, self.n_in*nstack)) obs = env.reset() self.update_obs(obs) self.gamma =", "self.nsteps = nsteps self.states = model.initial_state self.dones = [False for _ in range(nenv)]", "self._num_steps) logger.dump_tabular() END = True return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_u1,", "0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:,", "nstack, reuse=False) train_model = policy(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True) neglogpac =", "masks = masks[:episode_length] # discount the rewards rewards = discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs)", "lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, logdir=None, bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed) lr = args.lr", "= [pg - dg for pg, dg in zip(pg_grads, ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi,", "dones, _ = self.env.step(actions) self.states = states self.dones = dones for n, done", "import tensorflow as tf from baselines import logger from baselines.common import set_global_seeds, explained_variance", "0) # discount/bootstrap off value fn _obs, _rewards, _actions, _values, _masks, _u1, _u2", "mb_rewards, mb_actions, mb_values, mb_dones, mb_u1, mb_u2 = [], [], [], [], [], [],", "= nenvs*nsteps ADV = tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32,", "np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values", "= np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] last_values", "of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions =", "masks, u1, u2, values, summary=False) nseconds = time.time() - tstart fps = int((update", "= env.num_envs self.nenv = nenv self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack) self.obs = np.zeros((nenv, self.n_in*nstack))", "(nenv*nsteps, self.n_in*nstack) self.obs = np.zeros((nenv, self.n_in*nstack)) obs = env.reset() self.update_obs(obs) self.gamma = gamma", "x.get_shape().as_list() class Model(object): def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, nstack, num_procs, ent_coef=0.01,", "= policy(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy", "= True return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_u1, mb_u2, END def", "episode_over[n] = step self.update_obs(obs) mb_rewards.append(rewards) step += 1 mb_dones.append(self.dones) # batch of steps", "np.zeros((nenv, self.n_in*nstack)) obs = env.reset() self.update_obs(obs) self.gamma = gamma self.nsteps = nsteps self.states", "model, nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts = 0 self._num_steps = 0 self.rewards = []", "ob_space = env.observation_space ac_space = env.action_space num_procs = len(env.remotes) # HACK model =", "to reduce # IPC overhead self.obs = np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:, -self.n_in:] =", "self.obs = np.zeros((nenv, self.n_in*nstack)) obs = env.reset() self.update_obs(obs) self.gamma = gamma self.nsteps =", "model.train(obs, states, rewards, masks, u1, u2, values) nseconds = time.time() - tstart fps", "tstart fps = int((update * nbatch) / nseconds) env.close() if __name__ == '__main__':", "= dones for n, done in enumerate(dones): if done: self.obs[n] = self.obs[n]*0 self.update_obs(obs)", "g grads = [grad_dict[v] for v in params] print(grads) else: pg_loss = tf.reduce_sum((tf.stop_gradient(R)", "self.load = load tf.global_variables_initializer().run(session=sess) class Runner(object): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99):", "print(grads) else: pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac) policy_params = [v for", "= cv_grad_splits * vf_coef cv_grads = tf.gradients(vf_loss, vf_params) policy_grads = [] for e_grad,", "Scheduler, make_path, find_trainable_variables from baselines.a2c.policies import CnnPolicy from baselines.a2c.utils import cat_entropy, mse import", "import SubprocVecEnv from baselines.common.atari_wrappers import wrap_deepmind from baselines.a2c.utils import discount_with_dones, jacobian from baselines.a2c.utils", "u1, u2, END = runner.run() if END: break policy_loss, value_loss, policy_entropy, lv =", "= self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs, rewards, dones, _ =", "== 0 or update == 1: obs, states, rewards, masks, actions, values, u1,", "enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0:", "self.gamma = gamma self.nsteps = nsteps self.states = model.initial_state self.dones = [False for", "_values, _masks, _u1, _u2 = [], [], [], [], [], [], [] for", "= dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards", "return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values class RolloutRunner(Runner): def __init__(self, env, model,", "_actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards = self.rewards[-100:] # make numpy mb_obs =", "values, dones, masks, u1, u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_masks, mb_u1,", "pi_grads = -((tf.expand_dims(R, 1) - train_model.vf_t) * dlogp_dpi) pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads)", "tf.gradients(ddiff_loss, policy_params) sm = tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A * (1. - sm) +", "nenvs, 1, nstack, reuse=False) train_model = policy(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True)", "dones[:episode_length] u1 = u1[:episode_length] u2 = u2[:episode_length] assert dones[-1] == True masks =", "states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs, rewards,", "= step_model.initial_state self.save = save self.load = load tf.global_variables_initializer().run(session=sess) class Runner(object): def __init__(self,", "= False #print(self._num_steps, len(rewards)) #if self._num_steps > 5000000: if np.mean(self.rewards) >= 195.:#195.: #if", "tf.reduce_mean(em_log_var) for g, v in grads: print(v.name, g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op", "self.update_obs(obs) # run env until all threads finish episode_over = [-1 for i", "4) #logger.dump_tabular() END = False #print(self._num_steps, len(rewards)) #if self._num_steps > 5000000: if np.mean(self.rewards)", "update == 1: obs, states, rewards, masks, actions, values, u1, u2, END =", "steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1,", "make_path(save_path) joblib.dump(ps, save_path) def load(load_path): loaded_params = joblib.load(load_path) restores = [] for p,", "step_model.step self.value = step_model.value self.initial_state = step_model.initial_state self.save = save self.load = load", "= env.observation_space.shape nenv = env.num_envs self.nenv = nenv self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack) self.obs", "env, model, nsteps=5, nstack=4, gamma=0.99): super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts = 0", "= lr.value() td_map = { train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr } if", "nvalues=total_timesteps, schedule=lrschedule) self._step = 0 def train(obs, states, rewards, masks, u1, u2, values,", "mb_rewards = mb_rewards.flatten() mb_actions = mb_actions.flatten() mb_values = mb_values.flatten() mb_masks = mb_masks.flatten() return", "mb_dones[:, 1:] mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) #", "grads = [grad_dict[v] for v in params] print(grads) else: pg_loss = tf.reduce_sum((tf.stop_gradient(R) -", "Runner(object): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): self.env = env self.model =", "logger from baselines.common import set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.atari_wrappers import", "trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps,", "rewards, masks, u1, u2, values) nseconds = time.time() - tstart fps = int((update", "em_mean_sq = ema.average(all_policy_grads_sq) em_var = em_mean_sq - tf.square(em_mean) em_log_var = tf.log(em_var + 1e-20)", "self.dones = dones for n, done in enumerate(dones): if done: self.obs[n] = self.obs[n]", "self.nenv = nenv self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack) self.obs = np.zeros((nenv, self.n_in*nstack)) obs =", "step self.update_obs(obs) mb_rewards.append(rewards) step += 1 mb_dones.append(self.dones) # batch of steps to batch", "self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards) * 4) #logger.dump_tabular() END = False #print(self._num_steps, len(rewards))", "= np.asarray(_u1) mb_u2 = np.asarray(_u2) self._num_rollouts += 1 self._num_steps += len(rewards) * 4", "u1, u2, values, summary=False): advs = rewards - values for step in range(len(obs)):", "= nsteps self.states = model.initial_state self.dones = [False for _ in range(nenv)] def", "= np.asarray(_actions) mb_values = np.asarray(_values) mb_masks = np.asarray(_masks) mb_u1 = np.asarray(_u1) mb_u2 =", "fps = int((update * nbatch) / nseconds) env.close() if __name__ == '__main__': main()", "vf_coef = args.vf_coef nenvs = env.num_envs ob_space = env.observation_space ac_space = env.action_space num_procs", "enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_masks, mb_u1, mb_u2)): # pull out data rewards", "ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean = ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq) em_var = em_mean_sq - tf.square(em_mean)", "time.time() - tstart fps = int((update * nbatch) / nseconds) env.close() if __name__", "LR = tf.placeholder(tf.float32, []) step_model = policy(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False)", "if dones[-1] == 0: rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards = discount_with_dones(rewards,", "dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards =", "params if \"vf\" in v.name] entropy_grads = tf.gradients(entropy, policy_params) ddiff_loss = tf.reduce_sum(train_model.vf -", "values[:episode_length] dones = dones[:episode_length] u1 = u1[:episode_length] u2 = u2[:episode_length] assert dones[-1] ==", "vf_loss, entropy, mlgv, _train], td_map ) self._step += 1 return policy_loss, value_loss, policy_entropy,", "vf_loss, entropy, mlgv, _train], td_map ) self.writer.add_summary(sum_str, self._step) else: policy_loss, value_loss, policy_entropy, lv,", "np import tensorflow as tf from baselines import logger from baselines.common import set_global_seeds,", "sess = tf.Session(config=config) nact = ac_space.n nbatch = nenvs*nsteps ADV = tf.placeholder(tf.float32, [None])", "em_mean = ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq) em_var = em_mean_sq - tf.square(em_mean) em_log_var =", "step_model = policy(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False) train_model = policy(sess, ob_space,", "nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir)", "else: obs, states, rewards, masks, actions, values, u1, u2, END = runner.run() if", "as osp import gym import time import joblib import logging import numpy as", "mb_actions, mb_values, mb_dones = [],[],[],[],[] mb_states = self.states for n in range(self.nsteps): actions,", "ent_coef + p_grad policy_grads.append(grad) grad_dict = {} for g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads,", "= [v for v in params if \"vf\" in v.name] entropy_grads = tf.gradients(entropy,", "range(self.nenv)] mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_u1, mb_u2 = [], [], [], [],", "nsteps, nstack, num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None): config", "vf_params) policy_grads = [] for e_grad, p_grad, param in zip(entropy_grads, pg_grads, policy_params): grad", "rewards rewards = discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2)", "mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs, rewards, dones, _ = self.env.step(actions) self.states = states self.dones", "policy_params): grad = -e_grad * ent_coef + p_grad policy_grads.append(grad) grad_dict = {} for", "in pg_grads], 0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits * vf_coef cv_grads =", "cur_lr = lr.value() td_map = { train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr }", "grad_dict[v] = g grads = [grad_dict[v] for v in params] print(grads) else: pg_loss", "enumerate(dones): if done: self.obs[n] = self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of steps to", "np.asarray(_masks) mb_u1 = np.asarray(_u1) mb_u2 = np.asarray(_u2) self._num_rollouts += 1 self._num_steps += len(rewards)", "entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R) if train_model.relaxed: pg_loss", "= rewards[:episode_length] actions = actions[:episode_length] values = values[:episode_length] dones = dones[:episode_length] u1 =", "mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1,", "ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq) em_var = em_mean_sq - tf.square(em_mean) em_log_var = tf.log(em_var +", "self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap off value fn for n, (rewards, dones, value) in", "apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean = ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq) em_var = em_mean_sq", "mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs, rewards, dones, _ = self.env.step(actions) self.states =", "def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, nstack, num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,", "self.rewards = self.rewards[-100:] # make numpy mb_obs = np.asarray(_obs) mb_rewards = np.asarray(_rewards) mb_actions", "mb_masks, mb_u1, mb_u2)): # pull out data rewards = rewards.tolist() self.rewards.append(sum(rewards)) actions =", "max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None): config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth", "logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END = True return mb_obs, mb_states, mb_rewards, mb_masks,", "mb_rewards.append(rewards) step += 1 mb_dones.append(self.dones) # batch of steps to batch of rollouts", "logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END = True return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values,", "all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean = ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq)", "alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner = RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch", "episode episode_length = episode_over[n]+1 # crop out only played experience obs = obs[:episode_length]", "<gh_stars>10-100 import os.path as osp import gym import time import joblib import logging", "* 4) #logger.dump_tabular() END = False #print(self._num_steps, len(rewards)) #if self._num_steps > 5000000: if", "{ train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr } if states != []: td_map[train_model.S]", "= time.time() for update in range(1, total_timesteps//nbatch+1): if True: #update % log_interval ==", "experience obs = obs[:episode_length] rewards = rewards[:episode_length] actions = actions[:episode_length] values = values[:episode_length]", "list(zip(grads, params)) ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g, [-1]) for g in pg_grads],", "self.train = train self.train_model = train_model self.step_model = step_model self.step = step_model.step self.value", "run env until all threads finish episode_over = [-1 for i in range(self.nenv)]", "train_model = policy(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0)", "update_obs(self, obs): # Do frame-stacking here instead of the FrameStack wrapper to reduce", "4 # FRAME STACK ave_r = np.mean(self.rewards) #print(\"Episode {}, Ave R {}\".format(self._num_rollouts, ave_r))", "all_policy_grads = tf.concat([tf.reshape(g, [-1]) for g in pg_grads], 0) all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op", "mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_u1, mb_u2, END def learn(policy, env, seed,", "= tf.gradients(pg_loss, policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss = pg_loss - entropy *", "= list(zip(grads, params)) ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g, [-1]) for g in", "tf.gradients(vf_loss, vf_params) policy_grads = [] for e_grad, p_grad, param in zip(entropy_grads, pg_grads, policy_params):", "policy_params, grad_ys=pi_grads) pg_grads = [pg - dg for pg, dg in zip(pg_grads, ddiff_grads)]", "== 0: rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.gamma)", "_train = trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step = 0 def train(obs,", "= [] for e_grad, p_grad, param in zip(entropy_grads, pg_grads, policy_params): grad = -e_grad", "= [grad_dict[v] for v in params] print(grads) else: pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf))", "[], [], [], [] for n, (obs, rewards, actions, values, dones, masks, u1,", "tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR,", "np.asarray(_values) mb_masks = np.asarray(_masks) mb_u1 = np.asarray(_u1) mb_u2 = np.asarray(_u2) self._num_rollouts += 1", "v.name] vf_params = [v for v in params if \"vf\" in v.name] entropy_grads", "train_model.vf_t) * dlogp_dpi) pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads = [pg - dg", "-self.n_in:] = obs[:, :self.n_in] def run(self): mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]", "self.env.reset() self.update_obs(obs) # run env until all threads finish episode_over = [-1 for", "self.dones).tolist() #discount/bootstrap off value fn for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones,", "self.dones = dones for n, done in enumerate(dones): if done: self.obs[n] = self.obs[n]*0", "policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] = g grads = [grad_dict[v] for v in params] print(grads)", "import set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.atari_wrappers import wrap_deepmind from baselines.a2c.utils", "= nenvs*nsteps tstart = time.time() for update in range(1, total_timesteps//nbatch+1): if True: #update", "mb_u1 = np.asarray(_u1) mb_u2 = np.asarray(_u2) self._num_rollouts += 1 self._num_steps += len(rewards) *", "= rewards.tolist() self.rewards.append(sum(rewards)) actions = actions.tolist() values = values.tolist() dones = dones.tolist() masks", "logger.dump_tabular() else: obs, states, rewards, masks, actions, values, u1, u2, END = runner.run()", "lv, _ = sess.run( [pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self._step +=", "gamma=0.99): self.env = env self.model = model self.n_in, = env.observation_space.shape nenv = env.num_envs", "to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)", "model.train(obs, states, rewards, masks, u1, u2, values, summary=False) nseconds = time.time() - tstart", "states, rewards, masks, u1, u2, values, summary=False): advs = rewards - values for", "= model.train(obs, states, rewards, masks, u1, u2, values, summary=False) nseconds = time.time() -", "in params if \"vf\" in v.name] entropy_grads = tf.gradients(entropy, policy_params) ddiff_loss = tf.reduce_sum(train_model.vf", "-((tf.expand_dims(R, 1) - train_model.vf_t) * dlogp_dpi) pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads =", "_u2 = [], [], [], [], [], [], [] for n, (obs, rewards,", "__init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts =", "= gamma self.nsteps = nsteps self.states = model.initial_state self.dones = [False for _", "for n in range(self.nsteps): actions, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions)", "= np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1,", "tf.gradients(pg_loss, policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss = pg_loss - entropy * ent_coef", "= np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1,", "logger.dump_tabular() END = True return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_u1, mb_u2,", "mb_dones = [],[],[],[],[] mb_states = self.states for n in range(self.nsteps): actions, values, states", "mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) # discount/bootstrap off", "for pg, dg in zip(pg_grads, ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads =", "dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else:", "joblib import logging import numpy as np import tensorflow as tf from baselines", "mb_dones, mb_masks, mb_u1, mb_u2)): # pull out data rewards = rewards.tolist() self.rewards.append(sum(rewards)) actions", "import os.path as osp import gym import time import joblib import logging import", "rewards = rewards[:episode_length] actions = actions[:episode_length] values = values[:episode_length] dones = dones[:episode_length] u1", "policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss = pg_loss - entropy * ent_coef +", "in enumerate(dones): if done: self.obs[n] = self.obs[n] * 0 if episode_over[n] == -1:", "ob_space, ac_space, nenvs, 1, nstack, reuse=False) train_model = policy(sess, ob_space, ac_space, nenvs, nsteps,", "tf.one_hot(train_model.a0, ac_space.n) params = find_trainable_variables(\"model\") policy_params = [v for v in params if", "nenvs*nsteps tstart = time.time() for update in range(1, total_timesteps//nbatch+1): if True: #update %", "beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step =", "np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) # discount/bootstrap off value fn _obs, _rewards, _actions, _values, _masks,", "load(load_path): loaded_params = joblib.load(load_path) restores = [] for p, loaded_p in zip(params, loaded_params):", "tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32, []) step_model = policy(sess, ob_space, ac_space, nenvs, 1,", "mb_actions = mb_actions.flatten() mb_values = mb_values.flatten() mb_masks = mb_masks.flatten() return mb_obs, mb_states, mb_rewards,", "update in range(1, total_timesteps//nbatch+1): if True: #update % log_interval == 0 or update", "0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits * vf_coef cv_grads = tf.gradients(vf_loss, vf_params)", "= joblib.load(load_path) restores = [] for p, loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps", "save(save_path): ps = sess.run(params) make_path(save_path) joblib.dump(ps, save_path) def load(load_path): loaded_params = joblib.load(load_path) restores", "mb_actions.flatten() mb_values = mb_values.flatten() mb_masks = mb_masks.flatten() return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions,", "mb_u1, mb_u2 = [], [], [], [], [], [], [] mb_states = self.states", "obs[:, :self.n_in] def run(self): mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[] mb_states =", "dtype=np.float32).swapaxes(1, 0) # discount/bootstrap off value fn _obs, _rewards, _actions, _values, _masks, _u1,", "v in params] print(grads) else: pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac) policy_params", "obs[:episode_length] rewards = rewards[:episode_length] actions = actions[:episode_length] values = values[:episode_length] dones = dones[:episode_length]", "states != []: td_map[train_model.S] = states td_map[train_model.M] = masks if summary: sum_str, policy_loss,", "v in params if \"pi\" in v.name] pg_grads = tf.gradients(pg_loss, policy_params) vf_loss =", "ac_space, nenvs, 1, nstack, reuse=False) train_model = policy(sess, ob_space, ac_space, nenvs, nsteps, nstack,", "vf_params = [v for v in params if \"vf\" in v.name] entropy_grads =", "n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones =", "= {} for g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] = g grads", "!= []: td_map[train_model.S] = states td_map[train_model.M] = masks if summary: sum_str, policy_loss, value_loss,", "lv) logger.dump_tabular() else: obs, states, rewards, masks, actions, values, u1, u2, END =", "if \"pi\" in v.name] vf_params = [v for v in params if \"vf\"", "advs = rewards - values for step in range(len(obs)): cur_lr = lr.value() td_map", "-1: episode_over[n] = step self.update_obs(obs) mb_rewards.append(rewards) step += 1 mb_dones.append(self.dones) # batch of", "#batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards =", "logdir=None): config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True sess = tf.Session(config=config) nact", "True masks = masks[:episode_length] # discount the rewards rewards = discount_with_dones(rewards, dones, self.gamma)", "* neglogpac) policy_params = [v for v in params if \"pi\" in v.name]", "actions[:episode_length] values = values[:episode_length] dones = dones[:episode_length] u1 = u1[:episode_length] u2 = u2[:episode_length]", "lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step = 0 def train(obs, states, rewards, masks,", "policy(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy =", "import logger from baselines.common import set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.atari_wrappers", "# HACK model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef,", "train_model.relaxed: pg_loss = tf.constant(0.0) oh_A = tf.one_hot(train_model.a0, ac_space.n) params = find_trainable_variables(\"model\") policy_params =", "policy_grads.append(grad) grad_dict = {} for g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] =", "env until all threads finish episode_over = [-1 for i in range(self.nenv)] mb_obs,", "RolloutRunner(Runner): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma)", "ADV = tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32, []) step_model", "print(v.name, g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir) trainer", "self._num_steps = 0 self.rewards = [] def run(self): # reset env self.obs =", "values.tolist() dones = dones.tolist() masks = masks.tolist() u1, u2 = u1.tolist(), u2.tolist() #", "policy, ob_space, ac_space, nenvs, nsteps, nstack, num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5,", "episode_over = [-1 for i in range(self.nenv)] mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_u1,", "discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards = self.rewards[-100:]", "p, loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps = sess.run(restores) self.train = train self.train_model", "np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)", "mb_masks, mb_actions, mb_values, mb_u1, mb_u2, END def learn(policy, env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6),", "/ nseconds) ev = explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\",", "= env.num_envs ob_space = env.observation_space ac_space = env.action_space num_procs = len(env.remotes) # HACK", "em_var = em_mean_sq - tf.square(em_mean) em_log_var = tf.log(em_var + 1e-20) mlgv = tf.reduce_mean(em_log_var)", "R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards) * 4)", "= runner.run() if END: break policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards,", "self._step) else: policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [pg_loss, vf_loss, entropy, mlgv,", "{}, Ave R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards)", "pg_grads = [pg - dg for pg, dg in zip(pg_grads, ddiff_grads)] pi_param_grads =", "* (1. - sm) + (1. - oh_A) * (-sm) pi_grads = -((tf.expand_dims(R,", "= u1.tolist(), u2.tolist() # get length of this episode episode_length = episode_over[n]+1 #", "sum_str, policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [self.sum_op, pg_loss, vf_loss, entropy, mlgv,", "= states self.dones = dones for n, done in enumerate(dones): if done: self.obs[n]", "- tstart fps = int((update * nbatch) / nseconds) ev = explained_variance(values, rewards)", "dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones", "float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else: obs, states, rewards, masks, actions, values, u1, u2,", "mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0)", "= len(env.remotes) # HACK model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs,", "pg_loss = tf.constant(0.0) oh_A = tf.one_hot(train_model.a0, ac_space.n) params = find_trainable_variables(\"model\") policy_params = [v", "0 def train(obs, states, rewards, masks, u1, u2, values, summary=False): advs = rewards", "zip(pg_grads, ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p, [-1]) for p", "_masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards = self.rewards[-100:] # make numpy mb_obs = np.asarray(_obs) mb_rewards", "self.n_in, = env.observation_space.shape nenv = env.num_envs self.nenv = nenv self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack)", "= mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] last_values = self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap", "# pull out data rewards = rewards.tolist() self.rewards.append(sum(rewards)) actions = actions.tolist() values =", "= Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step = 0 def train(obs, states, rewards, masks, u1,", "from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.atari_wrappers import wrap_deepmind from baselines.a2c.utils import discount_with_dones, jacobian", "= self.env.step(actions) self.states = states self.dones = dones for n, done in enumerate(dones):", "logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else: obs, states, rewards, masks, actions, values, u1,", "mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values class RolloutRunner(Runner): def __init__(self, env, model, nsteps=5,", "= 0 while not all([e >= 0 for e in episode_over]): actions, u1,", "mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:]", "for p in pg_grads], 0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits * vf_coef", "of this episode episode_length = episode_over[n]+1 # crop out only played experience obs", "super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts = 0 self._num_steps = 0 self.rewards =", "e in episode_over]): actions, u1, u2, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs))", "v in params if \"vf\" in v.name] entropy_grads = tf.gradients(entropy, policy_params) ddiff_loss =", "u2 = u1.tolist(), u2.tolist() # get length of this episode episode_length = episode_over[n]+1", "mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[] mb_states = self.states for n in range(self.nsteps):", "p_grad, param in zip(entropy_grads, pg_grads, policy_params): grad = -e_grad * ent_coef + p_grad", "rewards, masks, u1, u2, values, summary=False): advs = rewards - values for step", "= [], [], [], [], [], [], [] for n, (obs, rewards, actions,", "logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else: obs, states, rewards, masks, actions, values, u1, u2, END", "vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner = RolloutRunner(env, model, nsteps=nsteps,", "env.action_space num_procs = len(env.remotes) # HACK model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps,", "np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)", "params if \"pi\" in v.name] pg_grads = tf.gradients(pg_loss, policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R))", "grads = list(zip(grads, params)) ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g, [-1]) for g", "self.obs[n] = self.obs[n] * 0 if episode_over[n] == -1: episode_over[n] = step self.update_obs(obs)", "logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards) * 4) #logger.dump_tabular() END =", "np.asarray(_u1) mb_u2 = np.asarray(_u2) self._num_rollouts += 1 self._num_steps += len(rewards) * 4 #", "in range(nenv)] def update_obs(self, obs): # Do frame-stacking here instead of the FrameStack", "for v in params if \"pi\" in v.name] pg_grads = tf.gradients(pg_loss, policy_params) vf_loss", "[pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self._step += 1 return policy_loss, value_loss,", "values for step in range(len(obs)): cur_lr = lr.value() td_map = { train_model.X:obs, train_model.U1:u1,", "rewards = discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards", "lv, _ = sess.run( [self.sum_op, pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self.writer.add_summary(sum_str,", "step = 0 while not all([e >= 0 for e in episode_over]): actions,", "tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss, policy_params) sm = tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A", "= masks[:episode_length] # discount the rewards rewards = discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards)", "num_procs = len(env.remotes) # HACK model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack,", "lr.value() td_map = { train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr } if states", ":-1] mb_dones = mb_dones[:, 1:] mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2 = np.asarray(mb_u2,", "pg_grads], 0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits * vf_coef cv_grads = tf.gradients(vf_loss,", "* 4 # FRAME STACK ave_r = np.mean(self.rewards) #print(\"Episode {}, Ave R {}\".format(self._num_rollouts,", "0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values =", "self.gamma) mb_rewards[n] = rewards mb_rewards = mb_rewards.flatten() mb_actions = mb_actions.flatten() mb_values = mb_values.flatten()", "- values for step in range(len(obs)): cur_lr = lr.value() td_map = { train_model.X:obs,", "ave_r = np.mean(self.rewards) #print(\"Episode {}, Ave R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1])", "ps = sess.run(restores) self.train = train self.train_model = train_model self.step_model = step_model self.step", "for p, loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps = sess.run(restores) self.train = train", "# run env until all threads finish episode_over = [-1 for i in", "_u1, _u2 = [], [], [], [], [], [], [] for n, (obs,", "cv_grads = tf.concat([tf.reshape(p, [-1]) for p in pg_grads], 0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss", "END = False #print(self._num_steps, len(rewards)) #if self._num_steps > 5000000: if np.mean(self.rewards) >= 195.:#195.:", "ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g, [-1]) for g in pg_grads], 0) all_policy_grads_sq", "nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts = 0 self._num_steps = 0 self.rewards = [] def", "self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1,", "tf.stop_gradient(train_model.vf)) * neglogpac) policy_params = [v for v in params if \"pi\" in", "done in enumerate(dones): if done: self.obs[n] = self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of", "tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step", "- tf.stop_gradient(train_model.vf)) * neglogpac) policy_params = [v for v in params if \"pi\"", "summary: sum_str, policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [self.sum_op, pg_loss, vf_loss, entropy,", "mb_u2 = np.asarray(_u2) self._num_rollouts += 1 self._num_steps += len(rewards) * 4 # FRAME", "crop out only played experience obs = obs[:episode_length] rewards = rewards[:episode_length] actions =", "pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads = [pg - dg for pg, dg", "env, model, nsteps=5, nstack=4, gamma=0.99): self.env = env self.model = model self.n_in, =", "values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs,", "tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac) policy_params = [v for v in params if", "def save(save_path): ps = sess.run(params) make_path(save_path) joblib.dump(ps, save_path) def load(load_path): loaded_params = joblib.load(load_path)", "= obs[:episode_length] rewards = rewards[:episode_length] actions = actions[:episode_length] values = values[:episode_length] dones =", "actions.tolist() values = values.tolist() dones = dones.tolist() masks = masks.tolist() u1, u2 =", "values = values[:episode_length] dones = dones[:episode_length] u1 = u1[:episode_length] u2 = u2[:episode_length] assert", "np.asarray(_obs) mb_rewards = np.asarray(_rewards) mb_actions = np.asarray(_actions) mb_values = np.asarray(_values) mb_masks = np.asarray(_masks)", "len(env.remotes) # HACK model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef,", "for n, done in enumerate(dones): if done: self.obs[n] = self.obs[n] * 0 if", "entropy, mlgv, _train], td_map ) self.writer.add_summary(sum_str, self._step) else: policy_loss, value_loss, policy_entropy, lv, _", "self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards = self.rewards[-100:] # make", "fn _obs, _rewards, _actions, _values, _masks, _u1, _u2 = [], [], [], [],", "done: self.obs[n] = self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of steps to batch of", "model, nsteps=5, nstack=4, gamma=0.99): super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) self._num_rollouts = 0 self._num_steps", "mb_obs = np.asarray(mb_obs).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1,", "vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss = pg_loss - entropy * ent_coef + vf_loss", "+= 1 return policy_loss, value_loss, policy_entropy, lv def save(save_path): ps = sess.run(params) make_path(save_path)", "mb_masks, mb_actions, mb_values class RolloutRunner(Runner): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): super().__init__(env,", "_train], td_map ) self.writer.add_summary(sum_str, self._step) else: policy_loss, value_loss, policy_entropy, lv, _ = sess.run(", "for n, (obs, rewards, actions, values, dones, masks, u1, u2) in enumerate(zip(mb_obs, mb_rewards,", "= tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean = ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq) em_var", "env.observation_space ac_space = env.action_space num_procs = len(env.remotes) # HACK model = Model(policy=policy, ob_space=ob_space,", "break policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values,", "- oh_A) * (-sm) pi_grads = -((tf.expand_dims(R, 1) - train_model.vf_t) * dlogp_dpi) pg_grads", "reset env self.obs = np.zeros(self.obs.shape) obs = self.env.reset() self.update_obs(obs) # run env until", "seed, nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, logdir=None, bootstrap=False,", "pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac) policy_params = [v for v in", "= discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] = rewards mb_rewards = mb_rewards.flatten() mb_actions = mb_actions.flatten()", "#if self._num_rollouts > 1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END = True return", "params)) ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g, [-1]) for g in pg_grads], 0)", "obs = self.env.reset() self.update_obs(obs) # run env until all threads finish episode_over =", "= np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) # discount/bootstrap off value fn _obs, _rewards, _actions, _values,", "env self.obs = np.zeros(self.obs.shape) obs = self.env.reset() self.update_obs(obs) # run env until all", "n in range(self.nsteps): actions, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values)", "pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self.writer.add_summary(sum_str, self._step) else: policy_loss, value_loss, policy_entropy,", "grad_dict = {} for g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] = g", "= np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) # discount/bootstrap off value", "1: obs, states, rewards, masks, actions, values, u1, u2, END = runner.run() if", "gamma self.nsteps = nsteps self.states = model.initial_state self.dones = [False for _ in", "= np.zeros((nenv, self.n_in*nstack)) obs = env.reset() self.update_obs(obs) self.gamma = gamma self.nsteps = nsteps", "episode_over]): actions, u1, u2, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values)", "done in enumerate(dones): if done: self.obs[n] = self.obs[n] * 0 if episode_over[n] ==", "for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones", "env.num_envs self.nenv = nenv self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack) self.obs = np.zeros((nenv, self.n_in*nstack)) obs", "mb_values, mb_dones = [],[],[],[],[] mb_states = self.states for n in range(self.nsteps): actions, values,", "baselines.a2c.utils import Scheduler, make_path, find_trainable_variables from baselines.a2c.policies import CnnPolicy from baselines.a2c.utils import cat_entropy,", "np.asarray(_actions) mb_values = np.asarray(_values) mb_masks = np.asarray(_masks) mb_u1 = np.asarray(_u1) mb_u2 = np.asarray(_u2)", "# get length of this episode episode_length = episode_over[n]+1 # crop out only", "with tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step = 0", "= model.train(obs, states, rewards, masks, u1, u2, values) nseconds = time.time() - tstart", "mb_masks = np.asarray(_masks) mb_u1 = np.asarray(_u1) mb_u2 = np.asarray(_u2) self._num_rollouts += 1 self._num_steps", "self.step = step_model.step self.value = step_model.value self.initial_state = step_model.initial_state self.save = save self.load", "mb_actions, mb_values, mb_dones, mb_u1, mb_u2 = [], [], [], [], [], [], []", "list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] = g grads = [grad_dict[v] for v in params]", "batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0) mb_rewards =", "mb_rewards.flatten() mb_actions = mb_actions.flatten() mb_values = mb_values.flatten() mb_masks = mb_masks.flatten() return mb_obs, mb_states,", "nsteps, nstack, reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\")", "mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs, rewards, dones, _ = self.env.step(actions) self.states = states", "np.asarray(mb_obs).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0) mb_values", "= [False for _ in range(nenv)] def update_obs(self, obs): # Do frame-stacking here", "states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs, rewards, dones, _", "ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps,", "class RolloutRunner(Runner): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): super().__init__(env, model, nsteps=nsteps, nstack=nstack,", "mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs, rewards, dones, _ = self.env.step(actions) self.states = states", "off value fn for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards", "= 0 self._num_steps = 0 self.rewards = [] def run(self): # reset env", "self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs, rewards, dones, _ = self.env.step(actions) self.states", "mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:,", "= np.asarray(_obs) mb_rewards = np.asarray(_rewards) mb_actions = np.asarray(_actions) mb_values = np.asarray(_values) mb_masks =", "em_log_var = tf.log(em_var + 1e-20) mlgv = tf.reduce_mean(em_log_var) for g, v in grads:", "= env self.model = model self.n_in, = env.observation_space.shape nenv = env.num_envs self.nenv =", "range(len(obs)): cur_lr = lr.value() td_map = { train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr", "= self.obs[n] * 0 if episode_over[n] == -1: episode_over[n] = step self.update_obs(obs) mb_rewards.append(rewards)", "tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef", "= Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha,", "threads finish episode_over = [-1 for i in range(self.nenv)] mb_obs, mb_rewards, mb_actions, mb_values,", "baselines.a2c.utils import cat_entropy, mse import random def gs(x): return x.get_shape().as_list() class Model(object): def", "_rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards = self.rewards[-100:] # make numpy mb_obs", "-e_grad * ent_coef + p_grad policy_grads.append(grad) grad_dict = {} for g, v in", "rewards, masks, u1, u2, values, summary=False) nseconds = time.time() - tstart fps =", "random def gs(x): return x.get_shape().as_list() class Model(object): def __init__(self, policy, ob_space, ac_space, nenvs,", "= tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits * vf_coef cv_grads = tf.gradients(vf_loss, vf_params) policy_grads =", "neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\",", "mb_values, mb_dones, mb_masks, mb_u1, mb_u2)): # pull out data rewards = rewards.tolist() self.rewards.append(sum(rewards))", "tf.concat([tf.reshape(p, [-1]) for p in pg_grads], 0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits", "= -e_grad * ent_coef + p_grad policy_grads.append(grad) grad_dict = {} for g, v", "num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner = RolloutRunner(env,", "= env.reset() self.update_obs(obs) self.gamma = gamma self.nsteps = nsteps self.states = model.initial_state self.dones", "self.obs = np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:, -self.n_in:] = obs[:, :self.n_in] def run(self): mb_obs,", "rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions,", "= tf.placeholder(tf.float32, []) step_model = policy(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False) train_model", "dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if", "find_trainable_variables(\"model\") policy_params = [v for v in params if \"pi\" in v.name] vf_params", "True: #update % log_interval == 0 or update == 1: obs, states, rewards,", "numpy as np import tensorflow as tf from baselines import logger from baselines.common", "total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner = RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch = nenvs*nsteps", "lv = model.train(obs, states, rewards, masks, u1, u2, values, summary=False) nseconds = time.time()", "self._num_rollouts += 1 self._num_steps += len(rewards) * 4 # FRAME STACK ave_r =", "u1 = u1[:episode_length] u2 = u2[:episode_length] assert dones[-1] == True masks = masks[:episode_length]", "if \"vf\" in v.name] entropy_grads = tf.gradients(entropy, policy_params) ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t)", "- tstart fps = int((update * nbatch) / nseconds) env.close() if __name__ ==", "= tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g, [-1]) for g in pg_grads], 0) all_policy_grads_sq =", "logdir=None, bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed) lr = args.lr vf_coef = args.vf_coef nenvs =", "td_map[train_model.M] = masks if summary: sum_str, policy_loss, value_loss, policy_entropy, lv, _ = sess.run(", "mb_rewards[n] = rewards mb_rewards = mb_rewards.flatten() mb_actions = mb_actions.flatten() mb_values = mb_values.flatten() mb_masks", "mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs, rewards, dones, _ = self.env.step(actions) self.states", "0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1,", "mb_actions, mb_values, mb_dones, mb_masks, mb_u1, mb_u2)): # pull out data rewards = rewards.tolist()", "u2 = u2[:episode_length] assert dones[-1] == True masks = masks[:episode_length] # discount the", "in episode_over]): actions, u1, u2, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions)", "= rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards+[value], dones+[0],", "{} for g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] = g grads =", "tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g, [-1]) for g in pg_grads], 0) all_policy_grads_sq = tf.square(all_policy_grads)", "u1[:episode_length] u2 = u2[:episode_length] assert dones[-1] == True masks = masks[:episode_length] # discount", "sm) + (1. - oh_A) * (-sm) pi_grads = -((tf.expand_dims(R, 1) - train_model.vf_t)", "= em_mean_sq - tf.square(em_mean) em_log_var = tf.log(em_var + 1e-20) mlgv = tf.reduce_mean(em_log_var) for", "tstart fps = int((update * nbatch) / nseconds) ev = explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\",", "in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] ==", "mb_u2, END def learn(policy, env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5,", "tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits * vf_coef cv_grads = tf.gradients(vf_loss, vf_params) policy_grads = []", "break policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values)", "value fn for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards =", "v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] = g grads = [grad_dict[v] for v", "logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards) * 4) #logger.dump_tabular() END = False #print(self._num_steps, len(rewards)) #if", "np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] last_values =", "for n, done in enumerate(dones): if done: self.obs[n] = self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones)", "train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr } if states != []: td_map[train_model.S] = states", "= find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R) if train_model.relaxed: pg_loss = tf.constant(0.0) oh_A =", "loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps = sess.run(restores) self.train = train self.train_model =", "self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs, rewards, dones, _", "tf.global_variables_initializer().run(session=sess) class Runner(object): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): self.env = env", "masks.tolist() u1, u2 = u1.tolist(), u2.tolist() # get length of this episode episode_length", "STACK ave_r = np.mean(self.rewards) #print(\"Episode {}, Ave R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\",", "= np.zeros(self.obs.shape) obs = self.env.reset() self.update_obs(obs) # run env until all threads finish", "= oh_A * (1. - sm) + (1. - oh_A) * (-sm) pi_grads", "mb_values.append(values) mb_dones.append(self.dones) obs, rewards, dones, _ = self.env.step(actions) self.states = states self.dones =", "END = runner.run() if END: break policy_loss, value_loss, policy_entropy, lv = model.train(obs, states,", "mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[] mb_states = self.states for n in", "discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] = rewards mb_rewards = mb_rewards.flatten() mb_actions = mb_actions.flatten() mb_values", "+= 1 self._num_steps += len(rewards) * 4 # FRAME STACK ave_r = np.mean(self.rewards)", "vf_coef grads = tf.gradients(loss, params) grads = list(zip(grads, params)) ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads", "= np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] mb_u1", "= train self.train_model = train_model self.step_model = step_model self.step = step_model.step self.value =", "= self.rewards[-100:] # make numpy mb_obs = np.asarray(_obs) mb_rewards = np.asarray(_rewards) mb_actions =", "0) mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) # discount/bootstrap off value fn _obs, _rewards,", "nenv = env.num_envs self.nenv = nenv self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack) self.obs = np.zeros((nenv,", "values = values.tolist() dones = dones.tolist() masks = masks.tolist() u1, u2 = u1.tolist(),", "grads = tf.gradients(loss, params) grads = list(zip(grads, params)) ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads =", "tf from baselines import logger from baselines.common import set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env import", "args.lr vf_coef = args.vf_coef nenvs = env.num_envs ob_space = env.observation_space ac_space = env.action_space", "in v.name] vf_params = [v for v in params if \"vf\" in v.name]", "from baselines import logger from baselines.common import set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv", "= actions[:episode_length] values = values[:episode_length] dones = dones[:episode_length] u1 = u1[:episode_length] u2 =", "mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] last_values = self.model.value(self.obs, self.states, self.dones).tolist()", "dlogp_dpi = oh_A * (1. - sm) + (1. - oh_A) * (-sm)", "enumerate(dones): if done: self.obs[n] = self.obs[n] * 0 if episode_over[n] == -1: episode_over[n]", "len(rewards) * 4 # FRAME STACK ave_r = np.mean(self.rewards) #print(\"Episode {}, Ave R", "policy_grads = [] for e_grad, p_grad, param in zip(entropy_grads, pg_grads, policy_params): grad =", "= ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq) em_var = em_mean_sq - tf.square(em_mean) em_log_var = tf.log(em_var", "mb_dones = mb_dones[:, 1:] last_values = self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap off value fn", "max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, logdir=None, bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed) lr =", "nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner =", "mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] last_values = self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap off", "class Model(object): def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, nstack, num_procs, ent_coef=0.01, vf_coef=0.5,", "= masks if summary: sum_str, policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [self.sum_op,", "0 self.rewards = [] def run(self): # reset env self.obs = np.zeros(self.obs.shape) obs", "= env.observation_space ac_space = env.action_space num_procs = len(env.remotes) # HACK model = Model(policy=policy,", "nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch = nenvs*nsteps tstart = time.time() for update in range(1,", "range(1, total_timesteps//nbatch+1): if True: #update % log_interval == 0 or update == 1:", "p in pg_grads], 0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits * vf_coef cv_grads", "mb_dones[:, 1:] last_values = self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap off value fn for n,", "value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values, summary=False) nseconds", "nstack=nstack, gamma=gamma) self._num_rollouts = 0 self._num_steps = 0 self.rewards = [] def run(self):", "Do frame-stacking here instead of the FrameStack wrapper to reduce # IPC overhead", "run(self): # reset env self.obs = np.zeros(self.obs.shape) obs = self.env.reset() self.update_obs(obs) # run", "for update in range(1, total_timesteps//nbatch+1): if True: #update % log_interval == 0 or", "only played experience obs = obs[:episode_length] rewards = rewards[:episode_length] actions = actions[:episode_length] values", "0 for e in episode_over]): actions, u1, u2, values, states = self.model.step(self.obs, self.states,", "nsteps=5, nstack=4, gamma=0.99): self.env = env self.model = model self.n_in, = env.observation_space.shape nenv", "import joblib import logging import numpy as np import tensorflow as tf from", "HACK model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm,", "= np.asarray(mb_obs).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)", "mb_obs = np.asarray(_obs) mb_rewards = np.asarray(_rewards) mb_actions = np.asarray(_actions) mb_values = np.asarray(_values) mb_masks", "= actions.tolist() values = values.tolist() dones = dones.tolist() masks = masks.tolist() u1, u2", "last_values = self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap off value fn for n, (rewards, dones,", "= mb_masks.flatten() return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values class RolloutRunner(Runner): def __init__(self,", "params] print(grads) else: pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac) policy_params = [v", "self.rewards[-100:] # make numpy mb_obs = np.asarray(_obs) mb_rewards = np.asarray(_rewards) mb_actions = np.asarray(_actions)", "= -((tf.expand_dims(R, 1) - train_model.vf_t) * dlogp_dpi) pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads", "= dones[:episode_length] u1 = u1[:episode_length] u2 = u2[:episode_length] assert dones[-1] == True masks", "rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else: obs, states,", "1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END = True return mb_obs, mb_states, mb_rewards,", "values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs, rewards, dones,", "in zip(entropy_grads, pg_grads, policy_params): grad = -e_grad * ent_coef + p_grad policy_grads.append(grad) grad_dict", "rewards - values for step in range(len(obs)): cur_lr = lr.value() td_map = {", "lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None): config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth =", "mse import random def gs(x): return x.get_shape().as_list() class Model(object): def __init__(self, policy, ob_space,", "if True: #update % log_interval == 0 or update == 1: obs, states,", "= tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss = pg_loss - entropy * ent_coef + vf_loss *", "import discount_with_dones, jacobian from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables from baselines.a2c.policies import CnnPolicy", ":self.n_in] def run(self): mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[] mb_states = self.states", "fn for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist()", "= self.states step = 0 while not all([e >= 0 for e in", "gamma=gamma) self._num_rollouts = 0 self._num_steps = 0 self.rewards = [] def run(self): #", "len(rewards)) #if self._num_steps > 5000000: if np.mean(self.rewards) >= 195.:#195.: #if self._num_rollouts > 1000:", "if done: self.obs[n] = self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of steps to batch", "mb_actions, mb_values, mb_u1, mb_u2, END def learn(policy, env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01,", "[pg - dg for pg, dg in zip(pg_grads, ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi, policy_params,", "* dlogp_dpi) pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads = [pg - dg for", "step_model.value self.initial_state = step_model.initial_state self.save = save self.load = load tf.global_variables_initializer().run(session=sess) class Runner(object):", "nstack=4, gamma=0.99): self.env = env self.model = model self.n_in, = env.observation_space.shape nenv =", "train_model self.step_model = step_model self.step = step_model.step self.value = step_model.value self.initial_state = step_model.initial_state", "mb_dones.append(self.dones) #batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards", "= tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train =", "n, done in enumerate(dones): if done: self.obs[n] = self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch", "= time.time() - tstart fps = int((update * nbatch) / nseconds) env.close() if", "mb_dones, mb_u1, mb_u2 = [], [], [], [], [], [], [] mb_states =", "v.name] entropy_grads = tf.gradients(entropy, policy_params) ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss,", "rewards[:episode_length] actions = actions[:episode_length] values = values[:episode_length] dones = dones[:episode_length] u1 = u1[:episode_length]", "log_interval=100, logdir=None, bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed) lr = args.lr vf_coef = args.vf_coef nenvs", "self.writer.add_summary(sum_str, self._step) else: policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [pg_loss, vf_loss, entropy,", "mb_values.flatten() mb_masks = mb_masks.flatten() return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values class RolloutRunner(Runner):", "lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner = RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma)", "mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_u1, mb_u2, END def learn(policy, env, seed, nsteps=5,", "entropy * ent_coef + vf_loss * vf_coef grads = tf.gradients(loss, params) grads =", "policy_params = [v for v in params if \"pi\" in v.name] pg_grads =", "self._num_rollouts) logger.record_tabular(\"l\", len(rewards) * 4) #logger.dump_tabular() END = False #print(self._num_steps, len(rewards)) #if self._num_steps", "0 while not all([e >= 0 for e in episode_over]): actions, u1, u2,", "if states != []: td_map[train_model.S] = states td_map[train_model.M] = masks if summary: sum_str,", "dones = dones.tolist() masks = masks.tolist() u1, u2 = u1.tolist(), u2.tolist() # get", "lv = model.train(obs, states, rewards, masks, u1, u2, values) nseconds = time.time() -", "mb_states = self.states for n in range(self.nsteps): actions, values, states = self.model.step(self.obs, self.states,", "= trainer.apply_gradients(grads) lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step = 0 def train(obs, states,", "[],[],[],[],[] mb_states = self.states for n in range(self.nsteps): actions, values, states = self.model.step(self.obs,", "Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule) self._step = 0 def train(obs, states, rewards, masks, u1, u2,", "tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A * (1. - sm) + (1. - oh_A) *", "zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps = sess.run(restores) self.train = train self.train_model = train_model self.step_model", "num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None): config = tf.ConfigProto(allow_soft_placement=True,", "explained_variance from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.atari_wrappers import wrap_deepmind from baselines.a2c.utils import discount_with_dones,", "= tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac) policy_params = [v for v in params", "0) all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean = ema.average(all_policy_grads) em_mean_sq =", "def learn(policy, env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99,", "self._num_steps += len(rewards) * 4 # FRAME STACK ave_r = np.mean(self.rewards) #print(\"Episode {},", "[] for n, (obs, rewards, actions, values, dones, masks, u1, u2) in enumerate(zip(mb_obs,", "ADV:advs, R:rewards, LR:cur_lr } if states != []: td_map[train_model.S] = states td_map[train_model.M] =", "self.initial_state = step_model.initial_state self.save = save self.load = load tf.global_variables_initializer().run(session=sess) class Runner(object): def", "policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values, summary=False)", "mb_u2)): # pull out data rewards = rewards.tolist() self.rewards.append(sum(rewards)) actions = actions.tolist() values", "= np.asarray(_u2) self._num_rollouts += 1 self._num_steps += len(rewards) * 4 # FRAME STACK", "jacobian from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables from baselines.a2c.policies import CnnPolicy from baselines.a2c.utils", "# discount the rewards rewards = discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values)", "out data rewards = rewards.tolist() self.rewards.append(sum(rewards)) actions = actions.tolist() values = values.tolist() dones", "[], [], [], [], [], [] mb_states = self.states step = 0 while", "of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards,", "neglogpac) policy_params = [v for v in params if \"pi\" in v.name] pg_grads", "off value fn _obs, _rewards, _actions, _values, _masks, _u1, _u2 = [], [],", "tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R) if", "1 return policy_loss, value_loss, policy_entropy, lv def save(save_path): ps = sess.run(params) make_path(save_path) joblib.dump(ps,", "= self.env.reset() self.update_obs(obs) # run env until all threads finish episode_over = [-1", "baselines.common.atari_wrappers import wrap_deepmind from baselines.a2c.utils import discount_with_dones, jacobian from baselines.a2c.utils import Scheduler, make_path,", "\"pi\" in v.name] vf_params = [v for v in params if \"vf\" in", "= tf.constant(0.0) oh_A = tf.one_hot(train_model.a0, ac_space.n) params = find_trainable_variables(\"model\") policy_params = [v for", "logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular() else: obs, states, rewards,", "(obs, rewards, actions, values, dones, masks, u1, u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values,", "epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, logdir=None, bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed) lr = args.lr vf_coef", "u2, END = runner.run() if END: break policy_loss, value_loss, policy_entropy, lv = model.train(obs,", "# IPC overhead self.obs = np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:, -self.n_in:] = obs[:, :self.n_in]", "[None]) R = tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32, []) step_model = policy(sess, ob_space,", "range(nenv)] def update_obs(self, obs): # Do frame-stacking here instead of the FrameStack wrapper", "self._num_rollouts > 1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END = True return mb_obs,", "dones.tolist() masks = masks.tolist() u1, u2 = u1.tolist(), u2.tolist() # get length of", "model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr,", "1:] mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) # discount/bootstrap", "v in params if \"pi\" in v.name] vf_params = [v for v in", "- train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss, policy_params) sm = tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A *", "[], [], [], [], [] for n, (obs, rewards, actions, values, dones, masks,", "= discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1) _u2.extend(u2) self.rewards =", "env self.model = model self.n_in, = env.observation_space.shape nenv = env.num_envs self.nenv = nenv", "mb_values, mb_dones, mb_u1, mb_u2 = [], [], [], [], [], [], [] mb_states", "= values[:episode_length] dones = dones[:episode_length] u1 = u1[:episode_length] u2 = u2[:episode_length] assert dones[-1]", "from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables from baselines.a2c.policies import CnnPolicy from baselines.a2c.utils import", "nsteps=5, nstack=1, total_timesteps=int(80e6), ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, logdir=None, bootstrap=False, args=None):", "np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] mb_u1 =", "g) self.sum_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]):", "in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_masks, mb_u1, mb_u2)): # pull out data", "tf.Session(config=config) nact = ac_space.n nbatch = nenvs*nsteps ADV = tf.placeholder(tf.float32, [None]) R =", "np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0) # discount/bootstrap off value fn", "nbatch = nenvs*nsteps tstart = time.time() for update in range(1, total_timesteps//nbatch+1): if True:", "wrapper to reduce # IPC overhead self.obs = np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:, -self.n_in:]", "tf.summary.histogram(\"R\", R) if train_model.relaxed: pg_loss = tf.constant(0.0) oh_A = tf.one_hot(train_model.a0, ac_space.n) params =", "tf.concat([tf.reshape(g, [-1]) for g in pg_grads], 0) all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads,", "for g in pg_grads], 0) all_policy_grads_sq = tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean", "for v in params if \"pi\" in v.name] vf_params = [v for v", "model, nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch = nenvs*nsteps tstart = time.time() for update in", "[], [], [], [], [] mb_states = self.states step = 0 while not", "self.step_model = step_model self.step = step_model.step self.value = step_model.value self.initial_state = step_model.initial_state self.save", "nseconds = time.time() - tstart fps = int((update * nbatch) / nseconds) ev", "states, rewards, masks, u1, u2, values, summary=False) nseconds = time.time() - tstart fps", "ac_space.n) params = find_trainable_variables(\"model\") policy_params = [v for v in params if \"pi\"", "R) if train_model.relaxed: pg_loss = tf.constant(0.0) oh_A = tf.one_hot(train_model.a0, ac_space.n) params = find_trainable_variables(\"model\")", "loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef grads =", "intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True sess = tf.Session(config=config) nact = ac_space.n nbatch =", "actions = actions.tolist() values = values.tolist() dones = dones.tolist() masks = masks.tolist() u1,", "policy_params) sm = tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A * (1. - sm) + (1.", "fps = int((update * nbatch) / nseconds) ev = explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy))", "# batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0) mb_rewards", "set_global_seeds(seed) lr = args.lr vf_coef = args.vf_coef nenvs = env.num_envs ob_space = env.observation_space", "joblib.dump(ps, save_path) def load(load_path): loaded_params = joblib.load(load_path) restores = [] for p, loaded_p", "ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards) * 4) #logger.dump_tabular() END = False", "= tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss, policy_params) sm = tf.nn.softmax(train_model.pi) dlogp_dpi =", "ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule,", "== True masks = masks[:episode_length] # discount the rewards rewards = discount_with_dones(rewards, dones,", "= tf.concat([tf.reshape(p, [-1]) for p in pg_grads], 0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss =", "#if self._num_steps > 5000000: if np.mean(self.rewards) >= 195.:#195.: #if self._num_rollouts > 1000: logger.record_tabular(\"finished_in\",", "run(self): mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[] mb_states = self.states for n", "gs(x): return x.get_shape().as_list() class Model(object): def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, nstack,", "= [], [], [], [], [], [], [] mb_states = self.states step =", "params = find_trainable_variables(\"model\") policy_params = [v for v in params if \"pi\" in", "train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr } if states != []: td_map[train_model.S] =", "[], [] mb_states = self.states step = 0 while not all([e >= 0", "= int((update * nbatch) / nseconds) ev = explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\",", "tf.square(all_policy_grads) apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq]) em_mean = ema.average(all_policy_grads) em_mean_sq = ema.average(all_policy_grads_sq) em_var =", "train self.train_model = train_model self.step_model = step_model self.step = step_model.step self.value = step_model.value", "runner = RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch = nenvs*nsteps tstart = time.time()", "episode_over[n]+1 # crop out only played experience obs = obs[:episode_length] rewards = rewards[:episode_length]", "wrap_deepmind from baselines.a2c.utils import discount_with_dones, jacobian from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables from", "to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)", "[]) step_model = policy(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False) train_model = policy(sess,", "FrameStack wrapper to reduce # IPC overhead self.obs = np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:,", "+ 1e-20) mlgv = tf.reduce_mean(em_log_var) for g, v in grads: print(v.name, g) tf.summary.histogram(v.name,", "lrschedule=lrschedule, logdir=logdir) runner = RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch = nenvs*nsteps tstart", "vf_params)): grad_dict[v] = g grads = [grad_dict[v] for v in params] print(grads) else:", "self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack) self.obs = np.zeros((nenv, self.n_in*nstack)) obs = env.reset() self.update_obs(obs) self.gamma", "nbatch = nenvs*nsteps ADV = tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32, [None]) LR =", "* vf_coef grads = tf.gradients(loss, params) grads = list(zip(grads, params)) ema = tf.train.ExponentialMovingAverage(.99)", "data rewards = rewards.tolist() self.rewards.append(sum(rewards)) actions = actions.tolist() values = values.tolist() dones =", "if summary: sum_str, policy_loss, value_loss, policy_entropy, lv, _ = sess.run( [self.sum_op, pg_loss, vf_loss,", "axis=1) self.obs[:, -self.n_in:] = obs[:, :self.n_in] def run(self): mb_obs, mb_rewards, mb_actions, mb_values, mb_dones", "or update == 1: obs, states, rewards, masks, actions, values, u1, u2, END", "lv def save(save_path): ps = sess.run(params) make_path(save_path) joblib.dump(ps, save_path) def load(load_path): loaded_params =", "Model(object): def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, nstack, num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5,", "grad_ys=pi_grads) cv_grads = tf.concat([tf.reshape(p, [-1]) for p in pg_grads], 0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads))", "= tf.gradients(entropy, policy_params) ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss, policy_params) sm", "= train_model self.step_model = step_model self.step = step_model.step self.value = step_model.value self.initial_state =", "reuse=False) train_model = policy(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi,", "baselines.common import set_global_seeds, explained_variance from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv from baselines.common.atari_wrappers import wrap_deepmind from", "vf_loss = cv_grad_splits * vf_coef cv_grads = tf.gradients(vf_loss, vf_params) policy_grads = [] for", "reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\", train_model.vf)", "actions, values, dones, masks, u1, u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_masks,", "self.value = step_model.value self.initial_state = step_model.initial_state self.save = save self.load = load tf.global_variables_initializer().run(session=sess)", "tf.reset_default_graph() set_global_seeds(seed) lr = args.lr vf_coef = args.vf_coef nenvs = env.num_envs ob_space =", "= tf.gradients(loss, params) grads = list(zip(grads, params)) ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g,", "in zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps = sess.run(restores) self.train = train self.train_model = train_model", "restores.append(p.assign(loaded_p)) ps = sess.run(restores) self.train = train self.train_model = train_model self.step_model = step_model", "u2, values) nseconds = time.time() - tstart fps = int((update * nbatch) /", "= [] def run(self): # reset env self.obs = np.zeros(self.obs.shape) obs = self.env.reset()", "instead of the FrameStack wrapper to reduce # IPC overhead self.obs = np.roll(self.obs,", "u2[:episode_length] assert dones[-1] == True masks = masks[:episode_length] # discount the rewards rewards", "policy_entropy, lv, _ = sess.run( [self.sum_op, pg_loss, vf_loss, entropy, mlgv, _train], td_map )", "shift=-self.n_in, axis=1) self.obs[:, -self.n_in:] = obs[:, :self.n_in] def run(self): mb_obs, mb_rewards, mb_actions, mb_values,", "+ (1. - oh_A) * (-sm) pi_grads = -((tf.expand_dims(R, 1) - train_model.vf_t) *", "#discount/bootstrap off value fn for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):", "(1. - oh_A) * (-sm) pi_grads = -((tf.expand_dims(R, 1) - train_model.vf_t) * dlogp_dpi)", "[], [], [], [], [], [] for n, (obs, rewards, actions, values, dones,", "in params] print(grads) else: pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac) policy_params =", "self.n_in*nstack)) obs = env.reset() self.update_obs(obs) self.gamma = gamma self.nsteps = nsteps self.states =", "steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1,", "= self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards) mb_dones.append(self.dones) #batch of steps to batch of rollouts mb_obs", "i in range(self.nenv)] mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_u1, mb_u2 = [], [],", "[-1]) for p in pg_grads], 0) cv_grad_splits = tf.reduce_sum(tf.square(cv_grads)) vf_loss = cv_grad_splits *", "n, (obs, rewards, actions, values, dones, masks, u1, u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions,", "from baselines.a2c.utils import discount_with_dones, jacobian from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables from baselines.a2c.policies", "self.model = model self.n_in, = env.observation_space.shape nenv = env.num_envs self.nenv = nenv self.batch_ob_shape", "dones for n, done in enumerate(dones): if done: self.obs[n] = self.obs[n]*0 self.update_obs(obs) mb_rewards.append(rewards)", "for e in episode_over]): actions, u1, u2, values, states = self.model.step(self.obs, self.states, self.dones)", "tf.square(em_mean) em_log_var = tf.log(em_var + 1e-20) mlgv = tf.reduce_mean(em_log_var) for g, v in", "for step in range(len(obs)): cur_lr = lr.value() td_map = { train_model.X:obs, train_model.U1:u1, train_model.U2:u2,", "0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] last_values = self.model.value(self.obs, self.states,", "= model self.n_in, = env.observation_space.shape nenv = env.num_envs self.nenv = nenv self.batch_ob_shape =", "done: self.obs[n] = self.obs[n] * 0 if episode_over[n] == -1: episode_over[n] = step", "RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch = nenvs*nsteps tstart = time.time() for update", "np.zeros(self.obs.shape) obs = self.env.reset() self.update_obs(obs) # run env until all threads finish episode_over", "mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_u1, mb_u2 = [], [], [], [], [],", "return policy_loss, value_loss, policy_entropy, lv def save(save_path): ps = sess.run(params) make_path(save_path) joblib.dump(ps, save_path)", "grad_ys=pi_grads) pg_grads = [pg - dg for pg, dg in zip(pg_grads, ddiff_grads)] pi_param_grads", "tensorflow as tf from baselines import logger from baselines.common import set_global_seeds, explained_variance from", "epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner = RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma) nbatch =", "ev = explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss)) logger.record_tabular(\"explained_variance\", float(ev)) logger.record_tabular(\"log_variance\", lv) logger.dump_tabular()", "= tf.gradients(ddiff_loss, policy_params) sm = tf.nn.softmax(train_model.pi) dlogp_dpi = oh_A * (1. - sm)", "#logger.dump_tabular() END = False #print(self._num_steps, len(rewards)) #if self._num_steps > 5000000: if np.mean(self.rewards) >=", "self.states = model.initial_state self.dones = [False for _ in range(nenv)] def update_obs(self, obs):", "ac_space, nenvs, nsteps, nstack, num_procs, ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear',", "actions, u1, u2, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones)", "value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values) nseconds =", ":-1] mb_dones = mb_dones[:, 1:] last_values = self.model.value(self.obs, self.states, self.dones).tolist() #discount/bootstrap off value", "ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner = RolloutRunner(env, model,", "= [v for v in params if \"pi\" in v.name] vf_params = [v", "pull out data rewards = rewards.tolist() self.rewards.append(sum(rewards)) actions = actions.tolist() values = values.tolist()", "dtype=np.bool).swapaxes(1, 0) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] mb_u1 = np.asarray(mb_u1,", "self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps) logger.dump_tabular() END = True return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions,", "cv_grad_splits * vf_coef cv_grads = tf.gradients(vf_loss, vf_params) policy_grads = [] for e_grad, p_grad,", "import gym import time import joblib import logging import numpy as np import", "else: pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac) policy_params = [v for v", "return x.get_shape().as_list() class Model(object): def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, nstack, num_procs,", "ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards) * 4) #logger.dump_tabular() END", "= self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs, rewards, dones,", "range(self.nsteps): actions, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs,", "mb_values = np.asarray(_values) mb_masks = np.asarray(_masks) mb_u1 = np.asarray(_u1) mb_u2 = np.asarray(_u2) self._num_rollouts", "batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1, 0) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0) mb_actions", "time import joblib import logging import numpy as np import tensorflow as tf", "[False for _ in range(nenv)] def update_obs(self, obs): # Do frame-stacking here instead", "= mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2", "1 mb_dones.append(self.dones) # batch of steps to batch of rollouts mb_obs = np.asarray(mb_obs).swapaxes(1,", "tf.constant(0.0) oh_A = tf.one_hot(train_model.a0, ac_space.n) params = find_trainable_variables(\"model\") policy_params = [v for v", "entropy_grads = tf.gradients(entropy, policy_params) ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss, policy_params)", "= [] for p, loaded_p in zip(params, loaded_params): restores.append(p.assign(loaded_p)) ps = sess.run(restores) self.train", "tf.summary.histogram(\"vf\", train_model.vf) tf.summary.histogram(\"R\", R) if train_model.relaxed: pg_loss = tf.constant(0.0) oh_A = tf.one_hot(train_model.a0, ac_space.n)", "= tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32, []) step_model =", "step += 1 mb_dones.append(self.dones) # batch of steps to batch of rollouts mb_obs", "ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4, alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None): config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs,", "if episode_over[n] == -1: episode_over[n] = step self.update_obs(obs) mb_rewards.append(rewards) step += 1 mb_dones.append(self.dones)", "dones+[0], self.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.gamma) mb_rewards[n] = rewards mb_rewards =", "mlgv = tf.reduce_mean(em_log_var) for g, v in grads: print(v.name, g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\",", "in params if \"pi\" in v.name] pg_grads = tf.gradients(pg_loss, policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf),", "if END: break policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1,", "mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards", "def run(self): # reset env self.obs = np.zeros(self.obs.shape) obs = self.env.reset() self.update_obs(obs) #", "= mb_actions.flatten() mb_values = mb_values.flatten() mb_masks = mb_masks.flatten() return mb_obs, mb_states, mb_rewards, mb_masks,", "= args.lr vf_coef = args.vf_coef nenvs = env.num_envs ob_space = env.observation_space ac_space =", "= self.states for n in range(self.nsteps): actions, values, states = self.model.step(self.obs, self.states, self.dones)", "values, u1, u2, END = runner.run() if END: break policy_loss, value_loss, policy_entropy, lv", "in range(self.nsteps): actions, values, states = self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones)", "= load tf.global_variables_initializer().run(session=sess) class Runner(object): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): self.env", "dones for n, done in enumerate(dones): if done: self.obs[n] = self.obs[n] * 0", "% log_interval == 0 or update == 1: obs, states, rewards, masks, actions,", "_ = sess.run( [pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self._step += 1", "= model.initial_state self.dones = [False for _ in range(nenv)] def update_obs(self, obs): #", "mb_values class RolloutRunner(Runner): def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99): super().__init__(env, model, nsteps=nsteps,", "np.asarray(_u2) self._num_rollouts += 1 self._num_steps += len(rewards) * 4 # FRAME STACK ave_r", "v in grads: print(v.name, g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op = tf.summary.merge_all() self.writer", "if \"pi\" in v.name] pg_grads = tf.gradients(pg_loss, policy_params) vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R)) loss", "= tf.log(em_var + 1e-20) mlgv = tf.reduce_mean(em_log_var) for g, v in grads: print(v.name,", "mb_values = mb_values.flatten() mb_masks = mb_masks.flatten() return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values", "g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] = g grads = [grad_dict[v] for", "rewards, actions, values, dones, masks, u1, u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones,", "_u1.extend(u1) _u2.extend(u2) self.rewards = self.rewards[-100:] # make numpy mb_obs = np.asarray(_obs) mb_rewards =", "[v for v in params if \"pi\" in v.name] pg_grads = tf.gradients(pg_loss, policy_params)", "nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef, max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir) runner", "= sess.run( [pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self._step += 1 return", "baselines.a2c.utils import discount_with_dones, jacobian from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables from baselines.a2c.policies import", "masks, u1, u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_masks, mb_u1, mb_u2)): #", "u1, u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_masks, mb_u1, mb_u2)): # pull", "tf.placeholder(tf.float32, []) step_model = policy(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False) train_model =", "get length of this episode episode_length = episode_over[n]+1 # crop out only played", "for g, v in grads: print(v.name, g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op =", "self.obs[n] * 0 if episode_over[n] == -1: episode_over[n] = step self.update_obs(obs) mb_rewards.append(rewards) step", "# crop out only played experience obs = obs[:episode_length] rewards = rewards[:episode_length] actions", "[] for e_grad, p_grad, param in zip(entropy_grads, pg_grads, policy_params): grad = -e_grad *", "1, nstack, reuse=False) train_model = policy(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True) neglogpac", "nenvs = env.num_envs ob_space = env.observation_space ac_space = env.action_space num_procs = len(env.remotes) #", "the rewards rewards = discount_with_dones(rewards, dones, self.gamma) _obs.extend(obs) _rewards.extend(rewards) _actions.extend(actions) _values.extend(values) _masks.extend(masks) _u1.extend(u1)", "pg_loss - entropy * ent_coef + vf_loss * vf_coef grads = tf.gradients(loss, params)", "tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=num_procs, inter_op_parallelism_threads=num_procs) config.gpu_options.allow_growth = True sess = tf.Session(config=config) nact = ac_space.n nbatch", "policy_params) ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss, policy_params) sm = tf.nn.softmax(train_model.pi)", "finish episode_over = [-1 for i in range(self.nenv)] mb_obs, mb_rewards, mb_actions, mb_values, mb_dones,", "self.update_obs(obs) mb_rewards.append(rewards) step += 1 mb_dones.append(self.dones) # batch of steps to batch of", "self._step = 0 def train(obs, states, rewards, masks, u1, u2, values, summary=False): advs", "loaded_params): restores.append(p.assign(loaded_p)) ps = sess.run(restores) self.train = train self.train_model = train_model self.step_model =", "= pg_loss - entropy * ent_coef + vf_loss * vf_coef grads = tf.gradients(loss,", "dones = dones[:episode_length] u1 = u1[:episode_length] u2 = u2[:episode_length] assert dones[-1] == True", "self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) mb_u1.append(u1) mb_u2.append(u2) obs, rewards, dones, _ = self.env.step(actions)", "n, done in enumerate(dones): if done: self.obs[n] = self.obs[n] * 0 if episode_over[n]", "vf_loss * vf_coef grads = tf.gradients(loss, params) grads = list(zip(grads, params)) ema =", "dlogp_dpi) pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads = [pg - dg for pg,", "dg for pg, dg in zip(pg_grads, ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) cv_grads", "[], [], [], [] mb_states = self.states step = 0 while not all([e", "obs): # Do frame-stacking here instead of the FrameStack wrapper to reduce #", "nenvs*nsteps ADV = tf.placeholder(tf.float32, [None]) R = tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32, [])", "ent_coef=0.01, max_grad_norm=0.5, lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99, log_interval=100, logdir=None, bootstrap=False, args=None): tf.reset_default_graph() set_global_seeds(seed) lr", "[-1 for i in range(self.nenv)] mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_u1, mb_u2 =", "actions, values, u1, u2, END = runner.run() if END: break policy_loss, value_loss, policy_entropy,", "* (-sm) pi_grads = -((tf.expand_dims(R, 1) - train_model.vf_t) * dlogp_dpi) pg_grads = tf.gradients(train_model.pi,", "int((update * nbatch) / nseconds) ev = explained_variance(values, rewards) logger.record_tabular(\"policy_entropy\", float(policy_entropy)) logger.record_tabular(\"value_loss\", float(value_loss))", "model, nsteps=5, nstack=4, gamma=0.99): self.env = env self.model = model self.n_in, = env.observation_space.shape", "= states td_map[train_model.M] = masks if summary: sum_str, policy_loss, value_loss, policy_entropy, lv, _", "5000000: if np.mean(self.rewards) >= 195.:#195.: #if self._num_rollouts > 1000: logger.record_tabular(\"finished_in\", self._num_rollouts) logger.record_tabular(\"total_steps\", self._num_steps)", "R = tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32, []) step_model = policy(sess, ob_space, ac_space,", "cv_grads = tf.gradients(vf_loss, vf_params) policy_grads = [] for e_grad, p_grad, param in zip(entropy_grads,", "= tf.reduce_mean(em_log_var) for g, v in grads: print(v.name, g) tf.summary.histogram(v.name, v) tf.summary.histogram(v.name+\"_grad\", g)", "_ in range(nenv)] def update_obs(self, obs): # Do frame-stacking here instead of the", "END = True return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_u1, mb_u2, END", "self._num_steps > 5000000: if np.mean(self.rewards) >= 195.:#195.: #if self._num_rollouts > 1000: logger.record_tabular(\"finished_in\", self._num_rollouts)", "= sess.run(params) make_path(save_path) joblib.dump(ps, save_path) def load(load_path): loaded_params = joblib.load(load_path) restores = []", "nstack=nstack, gamma=gamma) nbatch = nenvs*nsteps tstart = time.time() for update in range(1, total_timesteps//nbatch+1):", "mb_rewards, mb_masks, mb_actions, mb_values, mb_u1, mb_u2, END def learn(policy, env, seed, nsteps=5, nstack=1,", "oh_A) * (-sm) pi_grads = -((tf.expand_dims(R, 1) - train_model.vf_t) * dlogp_dpi) pg_grads =", "- train_model.vf_t) * dlogp_dpi) pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads) pg_grads = [pg -", "obs, rewards, dones, _ = self.env.step(actions) self.states = states self.dones = dones for", "in range(self.nenv)] mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_u1, mb_u2 = [], [], [],", "mb_dones = mb_dones[:, 1:] mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0) mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1,", "* 0 if episode_over[n] == -1: episode_over[n] = step self.update_obs(obs) mb_rewards.append(rewards) step +=", "in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)): grad_dict[v] = g grads = [grad_dict[v] for v in", "find_trainable_variables from baselines.a2c.policies import CnnPolicy from baselines.a2c.utils import cat_entropy, mse import random def", "self.rewards.append(sum(rewards)) actions = actions.tolist() values = values.tolist() dones = dones.tolist() masks = masks.tolist()", "[self.sum_op, pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self.writer.add_summary(sum_str, self._step) else: policy_loss, value_loss,", "here instead of the FrameStack wrapper to reduce # IPC overhead self.obs =", "== -1: episode_over[n] = step self.update_obs(obs) mb_rewards.append(rewards) step += 1 mb_dones.append(self.dones) # batch", "= { train_model.X:obs, train_model.U1:u1, train_model.U2:u2, ADV:advs, R:rewards, LR:cur_lr } if states != []:", "nstack, reuse=True) neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0) entropy = tf.reduce_sum(cat_entropy(train_model.pi)) params = find_trainable_variables(\"model\") tf.summary.histogram(\"vf\",", "obs, states, rewards, masks, actions, values, u1, u2, END = runner.run() if END:", "self.env = env self.model = model self.n_in, = env.observation_space.shape nenv = env.num_envs self.nenv", "rewards, masks, actions, values, u1, u2, END = runner.run() if END: break policy_loss,", "time.time() - tstart fps = int((update * nbatch) / nseconds) ev = explained_variance(values,", "self.model.step(self.obs, self.states, self.dones) mb_obs.append(np.copy(self.obs)) mb_actions.append(actions) mb_values.append(values) mb_dones.append(self.dones) obs, rewards, dones, _ = self.env.step(actions)", "_rewards, _actions, _values, _masks, _u1, _u2 = [], [], [], [], [], [],", "self.train_model = train_model self.step_model = step_model self.step = step_model.step self.value = step_model.value self.initial_state", "[grad_dict[v] for v in params] print(grads) else: pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) *", "as np import tensorflow as tf from baselines import logger from baselines.common import", "dones[-1] == 0: rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones,", "_actions, _values, _masks, _u1, _u2 = [], [], [], [], [], [], []", "mlgv, _train], td_map ) self.writer.add_summary(sum_str, self._step) else: policy_loss, value_loss, policy_entropy, lv, _ =", "tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999) with tf.control_dependencies([apply_mean_op]): _train = trainer.apply_gradients(grads)", "_ = self.env.step(actions) self.states = states self.dones = dones for n, done in", "tf.gradients(loss, params) grads = list(zip(grads, params)) ema = tf.train.ExponentialMovingAverage(.99) all_policy_grads = tf.concat([tf.reshape(g, [-1])", "logging import numpy as np import tensorflow as tf from baselines import logger", "numpy mb_obs = np.asarray(_obs) mb_rewards = np.asarray(_rewards) mb_actions = np.asarray(_actions) mb_values = np.asarray(_values)", "= tf.placeholder(tf.float32, [None]) LR = tf.placeholder(tf.float32, []) step_model = policy(sess, ob_space, ac_space, nenvs,", "np.mean(self.rewards) #print(\"Episode {}, Ave R {}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts)", "= tf.Session(config=config) nact = ac_space.n nbatch = nenvs*nsteps ADV = tf.placeholder(tf.float32, [None]) R", "dones[-1] == True masks = masks[:episode_length] # discount the rewards rewards = discount_with_dones(rewards,", "{}\".format(self._num_rollouts, ave_r)) logger.record_tabular(\"ave_r\", ave_r) logger.record_tabular(\"last_r\", self.rewards[-1]) logger.record_tabular(\"num_rollouts\", self._num_rollouts) logger.record_tabular(\"l\", len(rewards) * 4) #logger.dump_tabular()", "ac_space = env.action_space num_procs = len(env.remotes) # HACK model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space,", "args=None): tf.reset_default_graph() set_global_seeds(seed) lr = args.lr vf_coef = args.vf_coef nenvs = env.num_envs ob_space", "episode_length = episode_over[n]+1 # crop out only played experience obs = obs[:episode_length] rewards", "mb_rewards, mb_actions, mb_values, mb_dones, mb_masks, mb_u1, mb_u2)): # pull out data rewards =", "= policy(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False) train_model = policy(sess, ob_space, ac_space,", "if train_model.relaxed: pg_loss = tf.constant(0.0) oh_A = tf.one_hot(train_model.a0, ac_space.n) params = find_trainable_variables(\"model\") policy_params", "- tf.square(em_mean) em_log_var = tf.log(em_var + 1e-20) mlgv = tf.reduce_mean(em_log_var) for g, v", "v) tf.summary.histogram(v.name+\"_grad\", g) self.sum_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter(logdir) trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999)", "- dg for pg, dg in zip(pg_grads, ddiff_grads)] pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads)", "0 or update == 1: obs, states, rewards, masks, actions, values, u1, u2,", "= env.action_space num_procs = len(env.remotes) # HACK model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs,", "rewards = rewards.tolist() self.rewards.append(sum(rewards)) actions = actions.tolist() values = values.tolist() dones = dones.tolist()", "= sess.run( [self.sum_op, pg_loss, vf_loss, entropy, mlgv, _train], td_map ) self.writer.add_summary(sum_str, self._step) else:", "value_loss, policy_entropy, lv def save(save_path): ps = sess.run(params) make_path(save_path) joblib.dump(ps, save_path) def load(load_path):", "self._num_rollouts = 0 self._num_steps = 0 self.rewards = [] def run(self): # reset", "ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t) ddiff_grads = tf.gradients(ddiff_loss, policy_params) sm = tf.nn.softmax(train_model.pi) dlogp_dpi", "mb_states, mb_rewards, mb_masks, mb_actions, mb_values class RolloutRunner(Runner): def __init__(self, env, model, nsteps=5, nstack=4,", "save_path) def load(load_path): loaded_params = joblib.load(load_path) restores = [] for p, loaded_p in", "= rewards - values for step in range(len(obs)): cur_lr = lr.value() td_map =", "overhead self.obs = np.roll(self.obs, shift=-self.n_in, axis=1) self.obs[:, -self.n_in:] = obs[:, :self.n_in] def run(self):", "for v in params] print(grads) else: pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac)" ]
[ "<reponame>technolingo/AlgoStructuresPy<filename>reversestring/test_.py from .index import reverse_string def test_reverse_string(): assert reverse_string('heLLo woRld') == 'dlRow oLLeh'" ]
[ "setup, find_packages __version__ = '1.0.0' setup( name='ZwoReader', version=__version__, description='.', url='https://github.com/msimms/ZwoReader', packages=[], author='<NAME>', author_email='<EMAIL>',", "find_packages __version__ = '1.0.0' setup( name='ZwoReader', version=__version__, description='.', url='https://github.com/msimms/ZwoReader', packages=[], author='<NAME>', author_email='<EMAIL>', license='MIT',", "<reponame>msimms/ZwoReader from setuptools import setup, find_packages __version__ = '1.0.0' setup( name='ZwoReader', version=__version__, description='.',", "= '1.0.0' setup( name='ZwoReader', version=__version__, description='.', url='https://github.com/msimms/ZwoReader', packages=[], author='<NAME>', author_email='<EMAIL>', license='MIT', install_requires=[], python_requires='>=2.6'", "setuptools import setup, find_packages __version__ = '1.0.0' setup( name='ZwoReader', version=__version__, description='.', url='https://github.com/msimms/ZwoReader', packages=[],", "'1.0.0' setup( name='ZwoReader', version=__version__, description='.', url='https://github.com/msimms/ZwoReader', packages=[], author='<NAME>', author_email='<EMAIL>', license='MIT', install_requires=[], python_requires='>=2.6' )", "import setup, find_packages __version__ = '1.0.0' setup( name='ZwoReader', version=__version__, description='.', url='https://github.com/msimms/ZwoReader', packages=[], author='<NAME>',", "from setuptools import setup, find_packages __version__ = '1.0.0' setup( name='ZwoReader', version=__version__, description='.', url='https://github.com/msimms/ZwoReader',", "__version__ = '1.0.0' setup( name='ZwoReader', version=__version__, description='.', url='https://github.com/msimms/ZwoReader', packages=[], author='<NAME>', author_email='<EMAIL>', license='MIT', install_requires=[]," ]
[ "as e: if e.code == 503: timeOut = int(e.headers.get('retry-after', 30)) print( '503: Have", "'abstract').text.replace('\\n', ' '), 'id': info.find(ARXIV + 'id').text, 'categories': info.find(ARXIV + 'categories').text.split(), } doi", "'GetRecord').find(OAI + 'record') return prepare_record(record) def get_categories(): \"\"\"Returns a dict of all the", "= datestamp.text return result def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the OAI-api for articles submitted", "to use resumptiontoken in the next request params = {'verb': 'ListRecords', 'resumptionToken': token.text}", "rssUrl = 'http://export.arxiv.org/rss/' result = set() for category in get_categories(): print('Fetching IDs from", "will be no resumptiontoken and we can safely break token = root.find(OAI +", "if end_date: params['until'] = end_date result = {} while True: r = requests.get(base_url,", "print(msg.format(time_out)) sleep(time_out) continue # generate elementtree from responsedata root = ET.fromstring(r.text) # parse", "a dictionary structure that is easy to work with.\"\"\" if record.find(OAI + 'header').get('status',", "params = {'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date} if end_date: params['until'] = end_date", "= author.find(ARXIV + 'keyname').text a['affiliations'] = [] for affiliation in author.findall(ARXIV + 'affiliation'):", "found in the rss stream, which will be approximately the same as the", "will add a # resumptiontoken to the response, if we already have all", "articles from arXiv. To only scrape the metadata from the articles in the", "url to use resumptiontoken in the next request params = {'verb': 'ListRecords', 'resumptionToken':", "Have to wait before further requests. Retrying in {} seconds.' print(msg.format(time_out)) sleep(time_out) continue", "get_record(id): \"\"\"Gets metadata for a single record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching',", "'metadata').find(ARXIV + 'arXiv') result = {'title': info.find(ARXIV + 'title').text.replace('\\n', ' '), 'description': info.find(ARXIV", "+ 'journal-ref') # check that element is not None before trying to access", "if journal is not None else None authors = [] for author in", "time_out = int(r.headers.get('retry-after', 5)) msg = '503: Have to wait before further requests.", "return result def get_record(id): \"\"\"Gets metadata for a single record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv'", "+ category) for entry in feed['entries']: id = entry['link'].split('abs/')[1] result.add(id) return result def", "all the articles # there will be no resumptiontoken and we can safely", "= 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while True: try: response = urlopen(url) except urllib.error.HTTPError as", "= set() for category in get_categories(): print('Fetching IDs from the %s rss-feed' %", "request params = {'verb': 'ListRecords', 'resumptionToken': token.text} return result def get_record(id): \"\"\"Gets metadata", "'metadataPrefix': 'arXiv', 'from': start_date} if end_date: params['until'] = end_date result = {} while", "= record.find(OAI + 'metadata').find(ARXIV + 'arXiv') result = {'title': info.find(ARXIV + 'title').text.replace('\\n', '", "<NAME>' __copyright__ = 'Copyright 2020, The arXivDigest project' import urllib import xml.etree.ElementTree as", "info.find(ARXIV + 'abstract').text.replace('\\n', ' '), 'id': info.find(ARXIV + 'id').text, 'categories': info.find(ARXIV + 'categories').text.split(),", "add a # resumptiontoken to the response, if we already have all the", "timeOut = int(e.headers.get('retry-after', 30)) print( '503: Have to wait before further requests. Retrying", "url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching', url) response = urlopen(url) root = ET.fromstring(response.read())", "if firstname is None else firstname.text a['lastname'] = author.find(ARXIV + 'keyname').text a['affiliations'] =", "'resumptionToken': token.text} return result def get_record(id): \"\"\"Gets metadata for a single record.\"\"\" url", "return {} info = record.find(OAI + 'metadata').find(ARXIV + 'arXiv') result = {'title': info.find(ARXIV", "{'title': info.find(ARXIV + 'title').text.replace('\\n', ' '), 'description': info.find(ARXIV + 'abstract').text.replace('\\n', ' '), 'id':", "+ 'doi') comments = info.find(ARXIV + 'comments') licenses = info.find(ARXIV + 'license') journal", "scraping articles from arXiv. To only scrape the metadata from the articles in", "the methods related to scraping articles from arXiv. To only scrape the metadata", "import datetime import requests __author__ = '<NAME> and <NAME>' __copyright__ = 'Copyright 2020,", "not None else None authors = [] for author in info.find(ARXIV + 'authors'):", "Retrying in %d seconds.' % timeOut) sleep(timeOut) continue else: raise break root =", "= info.find(ARXIV + 'license') journal = info.find(ARXIV + 'journal-ref') # check that element", "trying to access the text result['doi'] = doi.text if doi is not None", "requests __author__ = '<NAME> and <NAME>' __copyright__ = 'Copyright 2020, The arXivDigest project'", "break token = root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken') if token is None or", "to a dictionary structure that is easy to work with.\"\"\" if record.find(OAI +", "+ 'record'): element = prepare_record(record) if element: result[element['id']] = element # If the", "{} seconds.' print(msg.format(time_out)) sleep(time_out) continue # generate elementtree from responsedata root = ET.fromstring(r.text)", "not None else None result['comments'] = comments.text if comments is not None else", "raise break root = ET.fromstring(response.read()) categories = root.find(OAI + 'ListSets').findall(OAI + 'set') result", "__author__ = '<NAME> and <NAME>' __copyright__ = 'Copyright 2020, The arXivDigest project' import", "urllib.request import urlopen import feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record):", "the xmlfile contains more than 1000 articles arXiv will add a # resumptiontoken", "rss-feed' % category) feed = feedparser.parse(rssUrl + category) for entry in feed['entries']: id", "= get_records_by_date(yesterday) result = {} for item in rss_ids: if item not in", "+ 'ListSets').findall(OAI + 'set') result = {} for category in categories: categoryID =", "{} for category in categories: categoryID = category.find(OAI + 'setSpec').text categoryName = category.find(OAI", "requests. Retrying in %d seconds.' % timeOut) sleep(timeOut) continue else: raise break root", "scrape articles between any two dates, to accomplish this use the get_records_by_date method.\"\"\"", "in any of the arXiv rss-streams.\"\"\" rss_ids = get_id_from_rss() yesterday = datetime.datetime.utcnow().date() -", "safely break token = root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken') if token is None", "if item not in articles: # download missing articles, if any element =", "from time import sleep from urllib.request import urlopen import feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}'", "firstname.text a['lastname'] = author.find(ARXIV + 'keyname').text a['affiliations'] = [] for affiliation in author.findall(ARXIV", "datestamp = record.find(OAI + 'header').find(OAI + 'datestamp') result['datestamp'] = datestamp.text return result def", "author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] = authors datestamp = record.find(OAI + 'header').find(OAI", "to the response, if we already have all the articles # there will", "use the harvestMetaDataRss method. It's also possible to scrape articles between any two", "{'verb': 'ListRecords', 'resumptionToken': token.text} return result def get_record(id): \"\"\"Gets metadata for a single", "= 'http://export.arxiv.org/rss/' result = set() for category in get_categories(): print('Fetching IDs from the", "articles in the rss-stream use the harvestMetaDataRss method. It's also possible to scrape", "coding: utf-8 -*- \"\"\"This module contains the the methods related to scraping articles", "None result['journal'] = journal.text if journal is not None else None authors =", "True: r = requests.get(base_url, params=params) print('Fetching', r.url) if r.status_code == 503: time_out =", "check that element is not None before trying to access the text result['doi']", "articles # there will be no resumptiontoken and we can safely break token", "= ET.fromstring(response.read()) record = root.find(OAI + 'GetRecord').find(OAI + 'record') return prepare_record(record) def get_categories():", "for author in info.find(ARXIV + 'authors'): a = {} firstname = author.find(ARXIV +", "get_id_from_rss() yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles = get_records_by_date(yesterday) result = {} for", "= record.find(OAI + 'header').find(OAI + 'datestamp') result['datestamp'] = datestamp.text return result def get_records_by_date(start_date,", "metadata for a single record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching', url) response", "comments.text if comments is not None else None result['license'] = licenses.text if licenses", "feed['entries']: id = entry['link'].split('abs/')[1] result.add(id) return result def harvest_metadata_rss(): \"\"\"This function will return", "submitted from the n previous days.\"\"\" base_url = 'http://export.arxiv.org/oai2' params = {'verb': 'ListRecords',", "for record in root.find(OAI + 'ListRecords').findall(OAI + 'record'): element = prepare_record(record) if element:", "related to scraping articles from arXiv. To only scrape the metadata from the", "if comments is not None else None result['license'] = licenses.text if licenses is", "ET from time import sleep from urllib.request import urlopen import feedparser OAI =", "available with info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while True: try: response =", "access the text result['doi'] = doi.text if doi is not None else None", "+ 'comments') licenses = info.find(ARXIV + 'license') journal = info.find(ARXIV + 'journal-ref') #", "article-ids found in the rss stream, which will be approximately the same as", "to work with.\"\"\" if record.find(OAI + 'header').get('status', None) == 'deleted': return {} info", "dict of all the main categories available with info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching',", "any element = get_record(item) result[element['id']] = element else: result[item] = articles[item] return result", "= root.find(OAI + 'GetRecord').find(OAI + 'record') return prepare_record(record) def get_categories(): \"\"\"Returns a dict", "articles arXiv will add a # resumptiontoken to the response, if we already", "if len(categoryID) > 1: categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo return result def", "None authors = [] for author in info.find(ARXIV + 'authors'): a = {}", "author in info.find(ARXIV + 'authors'): a = {} firstname = author.find(ARXIV + 'forenames')", "None before trying to access the text result['doi'] = doi.text if doi is", "+ 'header').find(OAI + 'datestamp') result['datestamp'] = datestamp.text return result def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes", "+ 'resumptionToken') if token is None or token.text is None: break # update", "xml.etree.ElementTree as ET from time import sleep from urllib.request import urlopen import feedparser", "e: if e.code == 503: timeOut = int(e.headers.get('retry-after', 30)) print( '503: Have to", "'arXiv', 'from': start_date} if end_date: params['until'] = end_date result = {} while True:", "== 'deleted': return {} info = record.find(OAI + 'metadata').find(ARXIV + 'arXiv') result =", "item in rss_ids: if item not in articles: # download missing articles, if", "the harvestMetaDataRss method. It's also possible to scrape articles between any two dates,", "doi is not None else None result['comments'] = comments.text if comments is not", "add it to result for record in root.find(OAI + 'ListRecords').findall(OAI + 'record'): element", "'description': info.find(ARXIV + 'abstract').text.replace('\\n', ' '), 'id': info.find(ARXIV + 'id').text, 'categories': info.find(ARXIV +", "info.find(ARXIV + 'id').text, 'categories': info.find(ARXIV + 'categories').text.split(), } doi = info.find(ARXIV + 'doi')", "categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo return result def get_id_from_rss(): \"\"\"Returns a set", "in the rss stream, which will be approximately the same as the articles", "text result['doi'] = doi.text if doi is not None else None result['comments'] =", "arXiv will add a # resumptiontoken to the response, if we already have", "approximately the same as the articles uploaded the previous day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/'", "in author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] = authors datestamp = record.find(OAI +", "is None else firstname.text a['lastname'] = author.find(ARXIV + 'keyname').text a['affiliations'] = [] for", "missing articles, if any element = get_record(item) result[element['id']] = element else: result[item] =", "a = {} firstname = author.find(ARXIV + 'forenames') a['firstname'] = '' if firstname", "of the arXiv rss-streams.\"\"\" rss_ids = get_id_from_rss() yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles", "project' import urllib import xml.etree.ElementTree as ET from time import sleep from urllib.request", "licenses.text if licenses is not None else None result['journal'] = journal.text if journal", "break root = ET.fromstring(response.read()) categories = root.find(OAI + 'ListSets').findall(OAI + 'set') result =", "element = prepare_record(record) if element: result[element['id']] = element # If the xmlfile contains", "uploaded the previous day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/' result = set() for category in", "[] for affiliation in author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] = authors datestamp", "None else None result['comments'] = comments.text if comments is not None else None", "len(categoryID) > 1: categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo return result def get_id_from_rss():", "30)) print( '503: Have to wait before further requests. Retrying in %d seconds.'", "methods related to scraping articles from arXiv. To only scrape the metadata from", "author.find(ARXIV + 'forenames') a['firstname'] = '' if firstname is None else firstname.text a['lastname']", "record.find(OAI + 'header').find(OAI + 'datestamp') result['datestamp'] = datestamp.text return result def get_records_by_date(start_date, end_date=None):", "None) == 'deleted': return {} info = record.find(OAI + 'metadata').find(ARXIV + 'arXiv') result", "rss-streams.\"\"\" rss_ids = get_id_from_rss() yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles = get_records_by_date(yesterday) result", "continue # generate elementtree from responsedata root = ET.fromstring(r.text) # parse the response", "'arXiv') result = {'title': info.find(ARXIV + 'title').text.replace('\\n', ' '), 'description': info.find(ARXIV + 'abstract').text.replace('\\n',", "It's also possible to scrape articles between any two dates, to accomplish this", "info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while True: try: response = urlopen(url) except", "only scrape the metadata from the articles in the rss-stream use the harvestMetaDataRss", "rss_ids: if item not in articles: # download missing articles, if any element", "already have all the articles # there will be no resumptiontoken and we", "not in articles: # download missing articles, if any element = get_record(item) result[element['id']]", "\"\"\"Formats the data to a dictionary structure that is easy to work with.\"\"\"", "category in get_categories(): print('Fetching IDs from the %s rss-feed' % category) feed =", "'license') journal = info.find(ARXIV + 'journal-ref') # check that element is not None", "previous days.\"\"\" base_url = 'http://export.arxiv.org/oai2' params = {'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date}", "single record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching', url) response = urlopen(url) root", "'categories': info.find(ARXIV + 'categories').text.split(), } doi = info.find(ARXIV + 'doi') comments = info.find(ARXIV", "in get_categories(): print('Fetching IDs from the %s rss-feed' % category) feed = feedparser.parse(rssUrl", "the arXiv rss-streams.\"\"\" rss_ids = get_id_from_rss() yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles =", "result = {} for item in rss_ids: if item not in articles: #", "end_date result = {} while True: r = requests.get(base_url, params=params) print('Fetching', r.url) if", "info.find(ARXIV + 'title').text.replace('\\n', ' '), 'description': info.find(ARXIV + 'abstract').text.replace('\\n', ' '), 'id': info.find(ARXIV", "is not None else None authors = [] for author in info.find(ARXIV +", "stream, which will be approximately the same as the articles uploaded the previous", "if record.find(OAI + 'header').get('status', None) == 'deleted': return {} info = record.find(OAI +", "'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while True: try: response = urlopen(url) except urllib.error.HTTPError as e:", "= get_id_from_rss() yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles = get_records_by_date(yesterday) result = {}", "root.find(OAI + 'GetRecord').find(OAI + 'record') return prepare_record(record) def get_categories(): \"\"\"Returns a dict of", "harvestMetaDataRss method. It's also possible to scrape articles between any two dates, to", "# there will be no resumptiontoken and we can safely break token =", "import feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats the data", "of all the main categories available with info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url)", "metadata from the articles in the rss-stream use the harvestMetaDataRss method. It's also", "resumptiontoken and we can safely break token = root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken')", "= [] for author in info.find(ARXIV + 'authors'): a = {} firstname =", "from responsedata root = ET.fromstring(r.text) # parse the response and add it to", "token = root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken') if token is None or token.text", "the the methods related to scraping articles from arXiv. To only scrape the", "= {} for category in categories: categoryID = category.find(OAI + 'setSpec').text categoryName =", "= requests.get(base_url, params=params) print('Fetching', r.url) if r.status_code == 503: time_out = int(r.headers.get('retry-after', 5))", "firstname = author.find(ARXIV + 'forenames') a['firstname'] = '' if firstname is None else", "to wait before further requests. Retrying in {} seconds.' print(msg.format(time_out)) sleep(time_out) continue #", "\"\"\"This module contains the the methods related to scraping articles from arXiv. To", "in the rss-stream use the harvestMetaDataRss method. It's also possible to scrape articles", "info.find(ARXIV + 'license') journal = info.find(ARXIV + 'journal-ref') # check that element is", "before further requests. Retrying in %d seconds.' % timeOut) sleep(timeOut) continue else: raise", "categoryName} categoryID = categoryID.split(':') if len(categoryID) > 1: categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]] =", "from the %s rss-feed' % category) feed = feedparser.parse(rssUrl + category) for entry", "before trying to access the text result['doi'] = doi.text if doi is not", "category) feed = feedparser.parse(rssUrl + category) for entry in feed['entries']: id = entry['link'].split('abs/')[1]", "while True: try: response = urlopen(url) except urllib.error.HTTPError as e: if e.code ==", "categoryInfo = {'name': categoryName} categoryID = categoryID.split(':') if len(categoryID) > 1: categoryInfo['masterCategory'] =", "import requests __author__ = '<NAME> and <NAME>' __copyright__ = 'Copyright 2020, The arXivDigest", "= {'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date} if end_date: params['until'] = end_date result", "'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching', url) response = urlopen(url) root = ET.fromstring(response.read()) record =", "in rss_ids: if item not in articles: # download missing articles, if any", "'deleted': return {} info = record.find(OAI + 'metadata').find(ARXIV + 'arXiv') result = {'title':", "harvest_metadata_rss(): \"\"\"This function will return the metadata from all the articles present in", "' '), 'description': info.find(ARXIV + 'abstract').text.replace('\\n', ' '), 'id': info.find(ARXIV + 'id').text, 'categories':", "authors datestamp = record.find(OAI + 'header').find(OAI + 'datestamp') result['datestamp'] = datestamp.text return result", "503: timeOut = int(e.headers.get('retry-after', 30)) print( '503: Have to wait before further requests.", "if any element = get_record(item) result[element['id']] = element else: result[item] = articles[item] return", "else firstname.text a['lastname'] = author.find(ARXIV + 'keyname').text a['affiliations'] = [] for affiliation in", "feedparser.parse(rssUrl + category) for entry in feed['entries']: id = entry['link'].split('abs/')[1] result.add(id) return result", "categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo return result def get_id_from_rss(): \"\"\"Returns a set of all", "'datestamp') result['datestamp'] = datestamp.text return result def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the OAI-api for", "sleep(timeOut) continue else: raise break root = ET.fromstring(response.read()) categories = root.find(OAI + 'ListSets').findall(OAI", "= '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats the data to a dictionary structure that is", "'title').text.replace('\\n', ' '), 'description': info.find(ARXIV + 'abstract').text.replace('\\n', ' '), 'id': info.find(ARXIV + 'id').text,", "present in any of the arXiv rss-streams.\"\"\" rss_ids = get_id_from_rss() yesterday = datetime.datetime.utcnow().date()", "yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles = get_records_by_date(yesterday) result = {} for item", "dictionary structure that is easy to work with.\"\"\" if record.find(OAI + 'header').get('status', None)", "= categoryID.split(':') if len(categoryID) > 1: categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo return", "urllib import xml.etree.ElementTree as ET from time import sleep from urllib.request import urlopen", "there will be no resumptiontoken and we can safely break token = root.find(OAI", "token.text is None: break # update url to use resumptiontoken in the next", "time import sleep from urllib.request import urlopen import feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV", "= {'verb': 'ListRecords', 'resumptionToken': token.text} return result def get_record(id): \"\"\"Gets metadata for a", "categoryName = category.find(OAI + 'setName').text categoryInfo = {'name': categoryName} categoryID = categoryID.split(':') if", "# -*- coding: utf-8 -*- \"\"\"This module contains the the methods related to", "to wait before further requests. Retrying in %d seconds.' % timeOut) sleep(timeOut) continue", "import sleep from urllib.request import urlopen import feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV =", "= categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo return result def get_id_from_rss(): \"\"\"Returns a set of", "= info.find(ARXIV + 'journal-ref') # check that element is not None before trying", "further requests. Retrying in %d seconds.' % timeOut) sleep(timeOut) continue else: raise break", "category.find(OAI + 'setName').text categoryInfo = {'name': categoryName} categoryID = categoryID.split(':') if len(categoryID) >", "same as the articles uploaded the previous day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/' result =", "if element: result[element['id']] = element # If the xmlfile contains more than 1000", "firstname is None else firstname.text a['lastname'] = author.find(ARXIV + 'keyname').text a['affiliations'] = []", "parse the response and add it to result for record in root.find(OAI +", "result for record in root.find(OAI + 'ListRecords').findall(OAI + 'record'): element = prepare_record(record) if", "int(r.headers.get('retry-after', 5)) msg = '503: Have to wait before further requests. Retrying in", "'header').find(OAI + 'datestamp') result['datestamp'] = datestamp.text return result def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the", "urllib.error.HTTPError as e: if e.code == 503: timeOut = int(e.headers.get('retry-after', 30)) print( '503:", "download missing articles, if any element = get_record(item) result[element['id']] = element else: result[item]", "category) for entry in feed['entries']: id = entry['link'].split('abs/')[1] result.add(id) return result def harvest_metadata_rss():", "more than 1000 articles arXiv will add a # resumptiontoken to the response,", "params=params) print('Fetching', r.url) if r.status_code == 503: time_out = int(r.headers.get('retry-after', 5)) msg =", "= {} firstname = author.find(ARXIV + 'forenames') a['firstname'] = '' if firstname is", "previous day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/' result = set() for category in get_categories(): print('Fetching", "category in categories: categoryID = category.find(OAI + 'setSpec').text categoryName = category.find(OAI + 'setName').text", "e.code == 503: timeOut = int(e.headers.get('retry-after', 30)) print( '503: Have to wait before", "+ 'categories').text.split(), } doi = info.find(ARXIV + 'doi') comments = info.find(ARXIV + 'comments')", "print('Fetching', url) response = urlopen(url) root = ET.fromstring(response.read()) record = root.find(OAI + 'GetRecord').find(OAI", "doi.text if doi is not None else None result['comments'] = comments.text if comments", "# resumptiontoken to the response, if we already have all the articles #", "info.find(ARXIV + 'doi') comments = info.find(ARXIV + 'comments') licenses = info.find(ARXIV + 'license')", "seconds.' % timeOut) sleep(timeOut) continue else: raise break root = ET.fromstring(response.read()) categories =", "result = set() for category in get_categories(): print('Fetching IDs from the %s rss-feed'", "get_categories(): print('Fetching IDs from the %s rss-feed' % category) feed = feedparser.parse(rssUrl +", "journal.text if journal is not None else None authors = [] for author", "resumptiontoken to the response, if we already have all the articles # there", "\"\"\"Scrapes the OAI-api for articles submitted from the n previous days.\"\"\" base_url =", "None or token.text is None: break # update url to use resumptiontoken in", "elementtree from responsedata root = ET.fromstring(r.text) # parse the response and add it", "+ 'setName').text categoryInfo = {'name': categoryName} categoryID = categoryID.split(':') if len(categoryID) > 1:", "a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] = authors datestamp = record.find(OAI + 'header').find(OAI + 'datestamp') result['datestamp']", "possible to scrape articles between any two dates, to accomplish this use the", "% category) feed = feedparser.parse(rssUrl + category) for entry in feed['entries']: id =", "'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date} if end_date: params['until'] = end_date result = {}", "+ 'setSpec').text categoryName = category.find(OAI + 'setName').text categoryInfo = {'name': categoryName} categoryID =", "ARXIV = '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats the data to a dictionary structure that", "'), 'description': info.find(ARXIV + 'abstract').text.replace('\\n', ' '), 'id': info.find(ARXIV + 'id').text, 'categories': info.find(ARXIV", "any two dates, to accomplish this use the get_records_by_date method.\"\"\" import datetime import", "id print('Fetching', url) response = urlopen(url) root = ET.fromstring(response.read()) record = root.find(OAI +", "articles = get_records_by_date(yesterday) result = {} for item in rss_ids: if item not", "comments = info.find(ARXIV + 'comments') licenses = info.find(ARXIV + 'license') journal = info.find(ARXIV", "result.add(id) return result def harvest_metadata_rss(): \"\"\"This function will return the metadata from all", "__copyright__ = 'Copyright 2020, The arXivDigest project' import urllib import xml.etree.ElementTree as ET", "licenses = info.find(ARXIV + 'license') journal = info.find(ARXIV + 'journal-ref') # check that", "n previous days.\"\"\" base_url = 'http://export.arxiv.org/oai2' params = {'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from':", "result def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the OAI-api for articles submitted from the n", "in the next request params = {'verb': 'ListRecords', 'resumptionToken': token.text} return result def", "def get_categories(): \"\"\"Returns a dict of all the main categories available with info.\"\"\"", "import urlopen import feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats", "503: time_out = int(r.headers.get('retry-after', 5)) msg = '503: Have to wait before further", "= feedparser.parse(rssUrl + category) for entry in feed['entries']: id = entry['link'].split('abs/')[1] result.add(id) return", "affiliation in author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] = authors datestamp = record.find(OAI", "wait before further requests. Retrying in %d seconds.' % timeOut) sleep(timeOut) continue else:", "+ 'set') result = {} for category in categories: categoryID = category.find(OAI +", "OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats the data to a", "result = {'title': info.find(ARXIV + 'title').text.replace('\\n', ' '), 'description': info.find(ARXIV + 'abstract').text.replace('\\n', '", "module contains the the methods related to scraping articles from arXiv. To only", "use the get_records_by_date method.\"\"\" import datetime import requests __author__ = '<NAME> and <NAME>'", "entry in feed['entries']: id = entry['link'].split('abs/')[1] result.add(id) return result def harvest_metadata_rss(): \"\"\"This function", "a['firstname'] = '' if firstname is None else firstname.text a['lastname'] = author.find(ARXIV +", "try: response = urlopen(url) except urllib.error.HTTPError as e: if e.code == 503: timeOut", "contains more than 1000 articles arXiv will add a # resumptiontoken to the", "{'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date} if end_date: params['until'] = end_date result =", "except urllib.error.HTTPError as e: if e.code == 503: timeOut = int(e.headers.get('retry-after', 30)) print(", "utf-8 -*- \"\"\"This module contains the the methods related to scraping articles from", "= '<NAME> and <NAME>' __copyright__ = 'Copyright 2020, The arXivDigest project' import urllib", "'http://export.arxiv.org/rss/' result = set() for category in get_categories(): print('Fetching IDs from the %s", "articles uploaded the previous day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/' result = set() for category", "in info.find(ARXIV + 'authors'): a = {} firstname = author.find(ARXIV + 'forenames') a['firstname']", "info.find(ARXIV + 'authors'): a = {} firstname = author.find(ARXIV + 'forenames') a['firstname'] =", "print('Fetching', r.url) if r.status_code == 503: time_out = int(r.headers.get('retry-after', 5)) msg = '503:", "result = {} for category in categories: categoryID = category.find(OAI + 'setSpec').text categoryName", "from urllib.request import urlopen import feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}' def", "update url to use resumptiontoken in the next request params = {'verb': 'ListRecords',", "arXivDigest project' import urllib import xml.etree.ElementTree as ET from time import sleep from", "'503: Have to wait before further requests. Retrying in {} seconds.' print(msg.format(time_out)) sleep(time_out)", "else: raise break root = ET.fromstring(response.read()) categories = root.find(OAI + 'ListSets').findall(OAI + 'set')", "between any two dates, to accomplish this use the get_records_by_date method.\"\"\" import datetime", "journal = info.find(ARXIV + 'journal-ref') # check that element is not None before", "r.status_code == 503: time_out = int(r.headers.get('retry-after', 5)) msg = '503: Have to wait", "element is not None before trying to access the text result['doi'] = doi.text", "params = {'verb': 'ListRecords', 'resumptionToken': token.text} return result def get_record(id): \"\"\"Gets metadata for", "we already have all the articles # there will be no resumptiontoken and", "params['until'] = end_date result = {} while True: r = requests.get(base_url, params=params) print('Fetching',", "print('fetching', url) while True: try: response = urlopen(url) except urllib.error.HTTPError as e: if", "the text result['doi'] = doi.text if doi is not None else None result['comments']", "result['license'] = licenses.text if licenses is not None else None result['journal'] = journal.text", "+ 'forenames') a['firstname'] = '' if firstname is None else firstname.text a['lastname'] =", "return the metadata from all the articles present in any of the arXiv", "metadata from all the articles present in any of the arXiv rss-streams.\"\"\" rss_ids", "+ 'abstract').text.replace('\\n', ' '), 'id': info.find(ARXIV + 'id').text, 'categories': info.find(ARXIV + 'categories').text.split(), }", "\"\"\"Gets metadata for a single record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching', url)", "a['affiliations'] = [] for affiliation in author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] =", "-*- \"\"\"This module contains the the methods related to scraping articles from arXiv.", "'{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats the data to a dictionary structure", "+ 'ListRecords').findall(OAI + 'record'): element = prepare_record(record) if element: result[element['id']] = element #", "item not in articles: # download missing articles, if any element = get_record(item)", "response = urlopen(url) root = ET.fromstring(response.read()) record = root.find(OAI + 'GetRecord').find(OAI + 'record')", "for item in rss_ids: if item not in articles: # download missing articles,", "contains the the methods related to scraping articles from arXiv. To only scrape", "{} while True: r = requests.get(base_url, params=params) print('Fetching', r.url) if r.status_code == 503:", "if doi is not None else None result['comments'] = comments.text if comments is", "'keyname').text a['affiliations'] = [] for affiliation in author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors']", "result['datestamp'] = datestamp.text return result def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the OAI-api for articles", "end_date: params['until'] = end_date result = {} while True: r = requests.get(base_url, params=params)", "categories = root.find(OAI + 'ListSets').findall(OAI + 'set') result = {} for category in", "ET.fromstring(response.read()) record = root.find(OAI + 'GetRecord').find(OAI + 'record') return prepare_record(record) def get_categories(): \"\"\"Returns", "= ET.fromstring(response.read()) categories = root.find(OAI + 'ListSets').findall(OAI + 'set') result = {} for", "than 1000 articles arXiv will add a # resumptiontoken to the response, if", "'forenames') a['firstname'] = '' if firstname is None else firstname.text a['lastname'] = author.find(ARXIV", "result def get_record(id): \"\"\"Gets metadata for a single record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' %", "url) while True: try: response = urlopen(url) except urllib.error.HTTPError as e: if e.code", "if e.code == 503: timeOut = int(e.headers.get('retry-after', 30)) print( '503: Have to wait", "'id').text, 'categories': info.find(ARXIV + 'categories').text.split(), } doi = info.find(ARXIV + 'doi') comments =", "return prepare_record(record) def get_categories(): \"\"\"Returns a dict of all the main categories available", "get_records_by_date(yesterday) result = {} for item in rss_ids: if item not in articles:", "= author.find(ARXIV + 'forenames') a['firstname'] = '' if firstname is None else firstname.text", "articles submitted from the n previous days.\"\"\" base_url = 'http://export.arxiv.org/oai2' params = {'verb':", "record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching', url) response = urlopen(url) root =", "urlopen(url) root = ET.fromstring(response.read()) record = root.find(OAI + 'GetRecord').find(OAI + 'record') return prepare_record(record)", "+ 'record') return prepare_record(record) def get_categories(): \"\"\"Returns a dict of all the main", "= prepare_record(record) if element: result[element['id']] = element # If the xmlfile contains more", "else None result['comments'] = comments.text if comments is not None else None result['license']", "prepare_record(record): \"\"\"Formats the data to a dictionary structure that is easy to work", "+ 'license') journal = info.find(ARXIV + 'journal-ref') # check that element is not", "for category in get_categories(): print('Fetching IDs from the %s rss-feed' % category) feed", "= entry['link'].split('abs/')[1] result.add(id) return result def harvest_metadata_rss(): \"\"\"This function will return the metadata", "or token.text is None: break # update url to use resumptiontoken in the", "+ 'keyname').text a['affiliations'] = [] for affiliation in author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a)", "next request params = {'verb': 'ListRecords', 'resumptionToken': token.text} return result def get_record(id): \"\"\"Gets", "'Copyright 2020, The arXivDigest project' import urllib import xml.etree.ElementTree as ET from time", "get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the OAI-api for articles submitted from the n previous days.\"\"\"", "result['comments'] = comments.text if comments is not None else None result['license'] = licenses.text", "%s rss-feed' % category) feed = feedparser.parse(rssUrl + category) for entry in feed['entries']:", "# download missing articles, if any element = get_record(item) result[element['id']] = element else:", "root.find(OAI + 'ListRecords').findall(OAI + 'record'): element = prepare_record(record) if element: result[element['id']] = element", "have all the articles # there will be no resumptiontoken and we can", "result def harvest_metadata_rss(): \"\"\"This function will return the metadata from all the articles", "'ListRecords').findall(OAI + 'record'): element = prepare_record(record) if element: result[element['id']] = element # If", "url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while True: try: response = urlopen(url) except urllib.error.HTTPError", "set() for category in get_categories(): print('Fetching IDs from the %s rss-feed' % category)", "'' if firstname is None else firstname.text a['lastname'] = author.find(ARXIV + 'keyname').text a['affiliations']", "from all the articles present in any of the arXiv rss-streams.\"\"\" rss_ids =", "not None before trying to access the text result['doi'] = doi.text if doi", "def get_id_from_rss(): \"\"\"Returns a set of all the article-ids found in the rss", "'categories').text.split(), } doi = info.find(ARXIV + 'doi') comments = info.find(ARXIV + 'comments') licenses", "if licenses is not None else None result['journal'] = journal.text if journal is", "method. It's also possible to scrape articles between any two dates, to accomplish", "response and add it to result for record in root.find(OAI + 'ListRecords').findall(OAI +", "before further requests. Retrying in {} seconds.' print(msg.format(time_out)) sleep(time_out) continue # generate elementtree", "= category.find(OAI + 'setName').text categoryInfo = {'name': categoryName} categoryID = categoryID.split(':') if len(categoryID)", "which will be approximately the same as the articles uploaded the previous day.\"\"\"", "the OAI-api for articles submitted from the n previous days.\"\"\" base_url = 'http://export.arxiv.org/oai2'", "function will return the metadata from all the articles present in any of", "id = entry['link'].split('abs/')[1] result.add(id) return result def harvest_metadata_rss(): \"\"\"This function will return the", "articles present in any of the arXiv rss-streams.\"\"\" rss_ids = get_id_from_rss() yesterday =", "sleep from urllib.request import urlopen import feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}'", "in categories: categoryID = category.find(OAI + 'setSpec').text categoryName = category.find(OAI + 'setName').text categoryInfo", "authors = [] for author in info.find(ARXIV + 'authors'): a = {} firstname", "accomplish this use the get_records_by_date method.\"\"\" import datetime import requests __author__ = '<NAME>", "main categories available with info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while True: try:", "response = urlopen(url) except urllib.error.HTTPError as e: if e.code == 503: timeOut =", "'resumptionToken') if token is None or token.text is None: break # update url", "rss_ids = get_id_from_rss() yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles = get_records_by_date(yesterday) result =", "to scraping articles from arXiv. To only scrape the metadata from the articles", "r.url) if r.status_code == 503: time_out = int(r.headers.get('retry-after', 5)) msg = '503: Have", "we can safely break token = root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken') if token", "= {} while True: r = requests.get(base_url, params=params) print('Fetching', r.url) if r.status_code ==", "= root.find(OAI + 'ListSets').findall(OAI + 'set') result = {} for category in categories:", "'{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats the data to a dictionary structure that is easy", "1000 articles arXiv will add a # resumptiontoken to the response, if we", "work with.\"\"\" if record.find(OAI + 'header').get('status', None) == 'deleted': return {} info =", "+ 'metadata').find(ARXIV + 'arXiv') result = {'title': info.find(ARXIV + 'title').text.replace('\\n', ' '), 'description':", "> 1: categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo return result def get_id_from_rss(): \"\"\"Returns", "end_date=None): \"\"\"Scrapes the OAI-api for articles submitted from the n previous days.\"\"\" base_url", "To only scrape the metadata from the articles in the rss-stream use the", "If the xmlfile contains more than 1000 articles arXiv will add a #", "root.find(OAI + 'ListSets').findall(OAI + 'set') result = {} for category in categories: categoryID", "base_url = 'http://export.arxiv.org/oai2' params = {'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date} if end_date:", "print('Fetching IDs from the %s rss-feed' % category) feed = feedparser.parse(rssUrl + category)", "day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/' result = set() for category in get_categories(): print('Fetching IDs", "the article-ids found in the rss stream, which will be approximately the same", "None result['comments'] = comments.text if comments is not None else None result['license'] =", "= int(e.headers.get('retry-after', 30)) print( '503: Have to wait before further requests. Retrying in", "that is easy to work with.\"\"\" if record.find(OAI + 'header').get('status', None) == 'deleted':", "be no resumptiontoken and we can safely break token = root.find(OAI + 'ListRecords').find(OAI", "'503: Have to wait before further requests. Retrying in %d seconds.' % timeOut)", "wait before further requests. Retrying in {} seconds.' print(msg.format(time_out)) sleep(time_out) continue # generate", "def prepare_record(record): \"\"\"Formats the data to a dictionary structure that is easy to", "info = record.find(OAI + 'metadata').find(ARXIV + 'arXiv') result = {'title': info.find(ARXIV + 'title').text.replace('\\n',", "token.text} return result def get_record(id): \"\"\"Gets metadata for a single record.\"\"\" url =", "prepare_record(record) if element: result[element['id']] = element # If the xmlfile contains more than", "def harvest_metadata_rss(): \"\"\"This function will return the metadata from all the articles present", "= int(r.headers.get('retry-after', 5)) msg = '503: Have to wait before further requests. Retrying", "will be approximately the same as the articles uploaded the previous day.\"\"\" rssUrl", "in root.find(OAI + 'ListRecords').findall(OAI + 'record'): element = prepare_record(record) if element: result[element['id']] =", "xmlfile contains more than 1000 articles arXiv will add a # resumptiontoken to", "the rss-stream use the harvestMetaDataRss method. It's also possible to scrape articles between", "response, if we already have all the articles # there will be no", "'setName').text categoryInfo = {'name': categoryName} categoryID = categoryID.split(':') if len(categoryID) > 1: categoryInfo['masterCategory']", "generate elementtree from responsedata root = ET.fromstring(r.text) # parse the response and add", "further requests. Retrying in {} seconds.' print(msg.format(time_out)) sleep(time_out) continue # generate elementtree from", "+ 'header').get('status', None) == 'deleted': return {} info = record.find(OAI + 'metadata').find(ARXIV +", "info.find(ARXIV + 'journal-ref') # check that element is not None before trying to", "= [] for affiliation in author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] = authors", "if r.status_code == 503: time_out = int(r.headers.get('retry-after', 5)) msg = '503: Have to", "'journal-ref') # check that element is not None before trying to access the", "'http://export.arxiv.org/oai2' params = {'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date} if end_date: params['until'] =", "no resumptiontoken and we can safely break token = root.find(OAI + 'ListRecords').find(OAI +", "root = ET.fromstring(r.text) # parse the response and add it to result for", "The arXivDigest project' import urllib import xml.etree.ElementTree as ET from time import sleep", "'set') result = {} for category in categories: categoryID = category.find(OAI + 'setSpec').text", "articles, if any element = get_record(item) result[element['id']] = element else: result[item] = articles[item]", "to scrape articles between any two dates, to accomplish this use the get_records_by_date", "categoryInfo return result def get_id_from_rss(): \"\"\"Returns a set of all the article-ids found", "this use the get_records_by_date method.\"\"\" import datetime import requests __author__ = '<NAME> and", "requests. Retrying in {} seconds.' print(msg.format(time_out)) sleep(time_out) continue # generate elementtree from responsedata", "'authors'): a = {} firstname = author.find(ARXIV + 'forenames') a['firstname'] = '' if", "'doi') comments = info.find(ARXIV + 'comments') licenses = info.find(ARXIV + 'license') journal =", "dates, to accomplish this use the get_records_by_date method.\"\"\" import datetime import requests __author__", "is None: break # update url to use resumptiontoken in the next request", "+ 'authors'): a = {} firstname = author.find(ARXIV + 'forenames') a['firstname'] = ''", "return result def get_id_from_rss(): \"\"\"Returns a set of all the article-ids found in", "is not None else None result['license'] = licenses.text if licenses is not None", "{} firstname = author.find(ARXIV + 'forenames') a['firstname'] = '' if firstname is None", "the articles # there will be no resumptiontoken and we can safely break", "== 503: timeOut = int(e.headers.get('retry-after', 30)) print( '503: Have to wait before further", "None: break # update url to use resumptiontoken in the next request params", "} doi = info.find(ARXIV + 'doi') comments = info.find(ARXIV + 'comments') licenses =", "for articles submitted from the n previous days.\"\"\" base_url = 'http://export.arxiv.org/oai2' params =", "# generate elementtree from responsedata root = ET.fromstring(r.text) # parse the response and", "datetime import requests __author__ = '<NAME> and <NAME>' __copyright__ = 'Copyright 2020, The", "= '503: Have to wait before further requests. Retrying in {} seconds.' print(msg.format(time_out))", "the response and add it to result for record in root.find(OAI + 'ListRecords').findall(OAI", "will return the metadata from all the articles present in any of the", "result['doi'] = doi.text if doi is not None else None result['comments'] = comments.text", "method.\"\"\" import datetime import requests __author__ = '<NAME> and <NAME>' __copyright__ = 'Copyright", "= 'http://export.arxiv.org/oai2' params = {'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date} if end_date: params['until']", "all the main categories available with info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while", "the metadata from the articles in the rss-stream use the harvestMetaDataRss method. It's", "from the articles in the rss-stream use the harvestMetaDataRss method. It's also possible", "a # resumptiontoken to the response, if we already have all the articles", "result[categoryID[-1]] = categoryInfo return result def get_id_from_rss(): \"\"\"Returns a set of all the", "the articles in the rss-stream use the harvestMetaDataRss method. It's also possible to", "it to result for record in root.find(OAI + 'ListRecords').findall(OAI + 'record'): element =", "is not None else None result['journal'] = journal.text if journal is not None", "result = {} while True: r = requests.get(base_url, params=params) print('Fetching', r.url) if r.status_code", "= {} for item in rss_ids: if item not in articles: # download", "datestamp.text return result def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the OAI-api for articles submitted from", "'record') return prepare_record(record) def get_categories(): \"\"\"Returns a dict of all the main categories", "use resumptiontoken in the next request params = {'verb': 'ListRecords', 'resumptionToken': token.text} return", "result['journal'] = journal.text if journal is not None else None authors = []", "entry['link'].split('abs/')[1] result.add(id) return result def harvest_metadata_rss(): \"\"\"This function will return the metadata from", "record in root.find(OAI + 'ListRecords').findall(OAI + 'record'): element = prepare_record(record) if element: result[element['id']]", "= category.find(OAI + 'setSpec').text categoryName = category.find(OAI + 'setName').text categoryInfo = {'name': categoryName}", "for entry in feed['entries']: id = entry['link'].split('abs/')[1] result.add(id) return result def harvest_metadata_rss(): \"\"\"This", "+ 'title').text.replace('\\n', ' '), 'description': info.find(ARXIV + 'abstract').text.replace('\\n', ' '), 'id': info.find(ARXIV +", "is None or token.text is None: break # update url to use resumptiontoken", "token is None or token.text is None: break # update url to use", "and <NAME>' __copyright__ = 'Copyright 2020, The arXivDigest project' import urllib import xml.etree.ElementTree", "= urlopen(url) root = ET.fromstring(response.read()) record = root.find(OAI + 'GetRecord').find(OAI + 'record') return", "the rss stream, which will be approximately the same as the articles uploaded", "rss stream, which will be approximately the same as the articles uploaded the", "msg = '503: Have to wait before further requests. Retrying in {} seconds.'", "start_date} if end_date: params['until'] = end_date result = {} while True: r =", "to accomplish this use the get_records_by_date method.\"\"\" import datetime import requests __author__ =", "= info.find(ARXIV + 'doi') comments = info.find(ARXIV + 'comments') licenses = info.find(ARXIV +", "'record'): element = prepare_record(record) if element: result[element['id']] = element # If the xmlfile", "the metadata from all the articles present in any of the arXiv rss-streams.\"\"\"", "{} for item in rss_ids: if item not in articles: # download missing", "if we already have all the articles # there will be no resumptiontoken", "articles between any two dates, to accomplish this use the get_records_by_date method.\"\"\" import", "'<NAME> and <NAME>' __copyright__ = 'Copyright 2020, The arXivDigest project' import urllib import", "the articles uploaded the previous day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/' result = set() for", "\"\"\"This function will return the metadata from all the articles present in any", "in %d seconds.' % timeOut) sleep(timeOut) continue else: raise break root = ET.fromstring(response.read())", "% id print('Fetching', url) response = urlopen(url) root = ET.fromstring(response.read()) record = root.find(OAI", "and we can safely break token = root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken') if", "IDs from the %s rss-feed' % category) feed = feedparser.parse(rssUrl + category) for", "category.find(OAI + 'setSpec').text categoryName = category.find(OAI + 'setName').text categoryInfo = {'name': categoryName} categoryID", "break # update url to use resumptiontoken in the next request params =", "+ 'id').text, 'categories': info.find(ARXIV + 'categories').text.split(), } doi = info.find(ARXIV + 'doi') comments", "categoryID = categoryID.split(':') if len(categoryID) > 1: categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo", "else None result['journal'] = journal.text if journal is not None else None authors", "= ET.fromstring(r.text) # parse the response and add it to result for record", "data to a dictionary structure that is easy to work with.\"\"\" if record.find(OAI", "and add it to result for record in root.find(OAI + 'ListRecords').findall(OAI + 'record'):", "element # If the xmlfile contains more than 1000 articles arXiv will add", "# parse the response and add it to result for record in root.find(OAI", "r = requests.get(base_url, params=params) print('Fetching', r.url) if r.status_code == 503: time_out = int(r.headers.get('retry-after',", "import xml.etree.ElementTree as ET from time import sleep from urllib.request import urlopen import", "root = ET.fromstring(response.read()) categories = root.find(OAI + 'ListSets').findall(OAI + 'set') result = {}", "not None else None result['journal'] = journal.text if journal is not None else", "'), 'id': info.find(ARXIV + 'id').text, 'categories': info.find(ARXIV + 'categories').text.split(), } doi = info.find(ARXIV", "the get_records_by_date method.\"\"\" import datetime import requests __author__ = '<NAME> and <NAME>' __copyright__", "authors.append(a) result['authors'] = authors datestamp = record.find(OAI + 'header').find(OAI + 'datestamp') result['datestamp'] =", "requests.get(base_url, params=params) print('Fetching', r.url) if r.status_code == 503: time_out = int(r.headers.get('retry-after', 5)) msg", "= 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching', url) response = urlopen(url) root = ET.fromstring(response.read()) record", "get_id_from_rss(): \"\"\"Returns a set of all the article-ids found in the rss stream,", "'header').get('status', None) == 'deleted': return {} info = record.find(OAI + 'metadata').find(ARXIV + 'arXiv')", "a['lastname'] = author.find(ARXIV + 'keyname').text a['affiliations'] = [] for affiliation in author.findall(ARXIV +", "the next request params = {'verb': 'ListRecords', 'resumptionToken': token.text} return result def get_record(id):", "None else None result['journal'] = journal.text if journal is not None else None", "# update url to use resumptiontoken in the next request params = {'verb':", "with info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while True: try: response = urlopen(url)", "from the n previous days.\"\"\" base_url = 'http://export.arxiv.org/oai2' params = {'verb': 'ListRecords', 'metadataPrefix':", "the %s rss-feed' % category) feed = feedparser.parse(rssUrl + category) for entry in", "'comments') licenses = info.find(ARXIV + 'license') journal = info.find(ARXIV + 'journal-ref') # check", "'ListSets').findall(OAI + 'set') result = {} for category in categories: categoryID = category.find(OAI", "author.find(ARXIV + 'keyname').text a['affiliations'] = [] for affiliation in author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text)", "ET.fromstring(r.text) # parse the response and add it to result for record in", "for affiliation in author.findall(ARXIV + 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] = authors datestamp =", "+ 'arXiv') result = {'title': info.find(ARXIV + 'title').text.replace('\\n', ' '), 'description': info.find(ARXIV +", "'ListRecords').find(OAI + 'resumptionToken') if token is None or token.text is None: break #", "if token is None or token.text is None: break # update url to", "+ 'datestamp') result['datestamp'] = datestamp.text return result def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the OAI-api", "= authors datestamp = record.find(OAI + 'header').find(OAI + 'datestamp') result['datestamp'] = datestamp.text return", "= '' if firstname is None else firstname.text a['lastname'] = author.find(ARXIV + 'keyname').text", "a single record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching', url) response = urlopen(url)", "for a single record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id print('Fetching', url) response =", "return result def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the OAI-api for articles submitted from the", "result[element['id']] = element # If the xmlfile contains more than 1000 articles arXiv", "a dict of all the main categories available with info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets'", "categories available with info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while True: try: response", "Retrying in {} seconds.' print(msg.format(time_out)) sleep(time_out) continue # generate elementtree from responsedata root", "all the articles present in any of the arXiv rss-streams.\"\"\" rss_ids = get_id_from_rss()", "is easy to work with.\"\"\" if record.find(OAI + 'header').get('status', None) == 'deleted': return", "url) response = urlopen(url) root = ET.fromstring(response.read()) record = root.find(OAI + 'GetRecord').find(OAI +", "set of all the article-ids found in the rss stream, which will be", "not None else None result['license'] = licenses.text if licenses is not None else", "+ 'GetRecord').find(OAI + 'record') return prepare_record(record) def get_categories(): \"\"\"Returns a dict of all", "rss-stream use the harvestMetaDataRss method. It's also possible to scrape articles between any", "from arXiv. To only scrape the metadata from the articles in the rss-stream", "while True: r = requests.get(base_url, params=params) print('Fetching', r.url) if r.status_code == 503: time_out", "Have to wait before further requests. Retrying in %d seconds.' % timeOut) sleep(timeOut)", "= comments.text if comments is not None else None result['license'] = licenses.text if", "to result for record in root.find(OAI + 'ListRecords').findall(OAI + 'record'): element = prepare_record(record)", "resumptiontoken in the next request params = {'verb': 'ListRecords', 'resumptionToken': token.text} return result", "the n previous days.\"\"\" base_url = 'http://export.arxiv.org/oai2' params = {'verb': 'ListRecords', 'metadataPrefix': 'arXiv',", "get_records_by_date method.\"\"\" import datetime import requests __author__ = '<NAME> and <NAME>' __copyright__ =", "a set of all the article-ids found in the rss stream, which will", "categories: categoryID = category.find(OAI + 'setSpec').text categoryName = category.find(OAI + 'setName').text categoryInfo =", "to access the text result['doi'] = doi.text if doi is not None else", "categoryID = category.find(OAI + 'setSpec').text categoryName = category.find(OAI + 'setName').text categoryInfo = {'name':", "doi = info.find(ARXIV + 'doi') comments = info.find(ARXIV + 'comments') licenses = info.find(ARXIV", "in feed['entries']: id = entry['link'].split('abs/')[1] result.add(id) return result def harvest_metadata_rss(): \"\"\"This function will", "else None result['license'] = licenses.text if licenses is not None else None result['journal']", "categoryID.split(':') if len(categoryID) > 1: categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo return result", "\"\"\"Returns a dict of all the main categories available with info.\"\"\" url =", "of all the article-ids found in the rss stream, which will be approximately", "result def get_id_from_rss(): \"\"\"Returns a set of all the article-ids found in the", "is not None else None result['comments'] = comments.text if comments is not None", "{} info = record.find(OAI + 'metadata').find(ARXIV + 'arXiv') result = {'title': info.find(ARXIV +", "also possible to scrape articles between any two dates, to accomplish this use", "'id': info.find(ARXIV + 'id').text, 'categories': info.find(ARXIV + 'categories').text.split(), } doi = info.find(ARXIV +", "feed = feedparser.parse(rssUrl + category) for entry in feed['entries']: id = entry['link'].split('abs/')[1] result.add(id)", "be approximately the same as the articles uploaded the previous day.\"\"\" rssUrl =", "= 'Copyright 2020, The arXivDigest project' import urllib import xml.etree.ElementTree as ET from", "= licenses.text if licenses is not None else None result['journal'] = journal.text if", "as ET from time import sleep from urllib.request import urlopen import feedparser OAI", "root = ET.fromstring(response.read()) record = root.find(OAI + 'GetRecord').find(OAI + 'record') return prepare_record(record) def", "scrape the metadata from the articles in the rss-stream use the harvestMetaDataRss method.", "= end_date result = {} while True: r = requests.get(base_url, params=params) print('Fetching', r.url)", "any of the arXiv rss-streams.\"\"\" rss_ids = get_id_from_rss() yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1)", "the previous day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/' result = set() for category in get_categories():", "' '), 'id': info.find(ARXIV + 'id').text, 'categories': info.find(ARXIV + 'categories').text.split(), } doi =", "record = root.find(OAI + 'GetRecord').find(OAI + 'record') return prepare_record(record) def get_categories(): \"\"\"Returns a", "\"\"\"Returns a set of all the article-ids found in the rss stream, which", "the same as the articles uploaded the previous day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/' result", "print( '503: Have to wait before further requests. Retrying in %d seconds.' %", "prepare_record(record) def get_categories(): \"\"\"Returns a dict of all the main categories available with", "else None authors = [] for author in info.find(ARXIV + 'authors'): a =", "= doi.text if doi is not None else None result['comments'] = comments.text if", "as the articles uploaded the previous day.\"\"\" rssUrl = 'http://export.arxiv.org/rss/' result = set()", "the response, if we already have all the articles # there will be", "result['authors'] = authors datestamp = record.find(OAI + 'header').find(OAI + 'datestamp') result['datestamp'] = datestamp.text", "= journal.text if journal is not None else None authors = [] for", "'setSpec').text categoryName = category.find(OAI + 'setName').text categoryInfo = {'name': categoryName} categoryID = categoryID.split(':')", "arXiv rss-streams.\"\"\" rss_ids = get_id_from_rss() yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles = get_records_by_date(yesterday)", "None result['license'] = licenses.text if licenses is not None else None result['journal'] =", "articles: # download missing articles, if any element = get_record(item) result[element['id']] = element", "5)) msg = '503: Have to wait before further requests. Retrying in {}", "sleep(time_out) continue # generate elementtree from responsedata root = ET.fromstring(r.text) # parse the", "two dates, to accomplish this use the get_records_by_date method.\"\"\" import datetime import requests", "timeOut) sleep(timeOut) continue else: raise break root = ET.fromstring(response.read()) categories = root.find(OAI +", "licenses is not None else None result['journal'] = journal.text if journal is not", "responsedata root = ET.fromstring(r.text) # parse the response and add it to result", "the articles present in any of the arXiv rss-streams.\"\"\" rss_ids = get_id_from_rss() yesterday", "seconds.' print(msg.format(time_out)) sleep(time_out) continue # generate elementtree from responsedata root = ET.fromstring(r.text) #", "None else firstname.text a['lastname'] = author.find(ARXIV + 'keyname').text a['affiliations'] = [] for affiliation", "info.find(ARXIV + 'comments') licenses = info.find(ARXIV + 'license') journal = info.find(ARXIV + 'journal-ref')", "can safely break token = root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken') if token is", "None else None result['license'] = licenses.text if licenses is not None else None", "the data to a dictionary structure that is easy to work with.\"\"\" if", "record.find(OAI + 'metadata').find(ARXIV + 'arXiv') result = {'title': info.find(ARXIV + 'title').text.replace('\\n', ' '),", "with.\"\"\" if record.find(OAI + 'header').get('status', None) == 'deleted': return {} info = record.find(OAI", "import urllib import xml.etree.ElementTree as ET from time import sleep from urllib.request import", "[] for author in info.find(ARXIV + 'authors'): a = {} firstname = author.find(ARXIV", "'from': start_date} if end_date: params['until'] = end_date result = {} while True: r", "days.\"\"\" base_url = 'http://export.arxiv.org/oai2' params = {'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date} if", "= '{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats the data to a dictionary", "continue else: raise break root = ET.fromstring(response.read()) categories = root.find(OAI + 'ListSets').findall(OAI +", "OAI-api for articles submitted from the n previous days.\"\"\" base_url = 'http://export.arxiv.org/oai2' params", "for category in categories: categoryID = category.find(OAI + 'setSpec').text categoryName = category.find(OAI +", "comments is not None else None result['license'] = licenses.text if licenses is not", "2020, The arXivDigest project' import urllib import xml.etree.ElementTree as ET from time import", "= {'title': info.find(ARXIV + 'title').text.replace('\\n', ' '), 'description': info.find(ARXIV + 'abstract').text.replace('\\n', ' '),", "== 503: time_out = int(r.headers.get('retry-after', 5)) msg = '503: Have to wait before", "def get_records_by_date(start_date, end_date=None): \"\"\"Scrapes the OAI-api for articles submitted from the n previous", "arXiv. To only scrape the metadata from the articles in the rss-stream use", "int(e.headers.get('retry-after', 30)) print( '503: Have to wait before further requests. Retrying in %d", "%d seconds.' % timeOut) sleep(timeOut) continue else: raise break root = ET.fromstring(response.read()) categories", "ET.fromstring(response.read()) categories = root.find(OAI + 'ListSets').findall(OAI + 'set') result = {} for category", "record.find(OAI + 'header').get('status', None) == 'deleted': return {} info = record.find(OAI + 'metadata').find(ARXIV", "= root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken') if token is None or token.text is", "datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles = get_records_by_date(yesterday) result = {} for item in rss_ids:", "return result def harvest_metadata_rss(): \"\"\"This function will return the metadata from all the", "= datetime.datetime.utcnow().date() - datetime.timedelta(days=1) articles = get_records_by_date(yesterday) result = {} for item in", "easy to work with.\"\"\" if record.find(OAI + 'header').get('status', None) == 'deleted': return {}", "info.find(ARXIV + 'categories').text.split(), } doi = info.find(ARXIV + 'doi') comments = info.find(ARXIV +", "in articles: # download missing articles, if any element = get_record(item) result[element['id']] =", "# check that element is not None before trying to access the text", "'ListRecords', 'resumptionToken': token.text} return result def get_record(id): \"\"\"Gets metadata for a single record.\"\"\"", "that element is not None before trying to access the text result['doi'] =", "- datetime.timedelta(days=1) articles = get_records_by_date(yesterday) result = {} for item in rss_ids: if", "% timeOut) sleep(timeOut) continue else: raise break root = ET.fromstring(response.read()) categories = root.find(OAI", "get_categories(): \"\"\"Returns a dict of all the main categories available with info.\"\"\" url", "= element # If the xmlfile contains more than 1000 articles arXiv will", "urlopen(url) except urllib.error.HTTPError as e: if e.code == 503: timeOut = int(e.headers.get('retry-after', 30))", "the main categories available with info.\"\"\" url = 'http://export.arxiv.org/oai2?verb=ListSets' print('fetching', url) while True:", "True: try: response = urlopen(url) except urllib.error.HTTPError as e: if e.code == 503:", "-*- coding: utf-8 -*- \"\"\"This module contains the the methods related to scraping", "journal is not None else None authors = [] for author in info.find(ARXIV", "{'name': categoryName} categoryID = categoryID.split(':') if len(categoryID) > 1: categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]]", "'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] = authors datestamp = record.find(OAI + 'header').find(OAI + 'datestamp')", "def get_record(id): \"\"\"Gets metadata for a single record.\"\"\" url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id", "1: categoryInfo['masterCategory'] = categoryID[0].capitalize() result[categoryID[-1]] = categoryInfo return result def get_id_from_rss(): \"\"\"Returns a", "all the article-ids found in the rss stream, which will be approximately the", "= categoryInfo return result def get_id_from_rss(): \"\"\"Returns a set of all the article-ids", "= {'name': categoryName} categoryID = categoryID.split(':') if len(categoryID) > 1: categoryInfo['masterCategory'] = categoryID[0].capitalize()", "in {} seconds.' print(msg.format(time_out)) sleep(time_out) continue # generate elementtree from responsedata root =", "+ 'affiliation'): a['affiliations'].append(affiliation.text) authors.append(a) result['authors'] = authors datestamp = record.find(OAI + 'header').find(OAI +", "root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken') if token is None or token.text is None:", "element: result[element['id']] = element # If the xmlfile contains more than 1000 articles", "structure that is easy to work with.\"\"\" if record.find(OAI + 'header').get('status', None) ==", "# If the xmlfile contains more than 1000 articles arXiv will add a", "feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats the data to", "datetime.timedelta(days=1) articles = get_records_by_date(yesterday) result = {} for item in rss_ids: if item", "= urlopen(url) except urllib.error.HTTPError as e: if e.code == 503: timeOut = int(e.headers.get('retry-after',", "= info.find(ARXIV + 'comments') licenses = info.find(ARXIV + 'license') journal = info.find(ARXIV +", "urlopen import feedparser OAI = '{http://www.openarchives.org/OAI/2.0/}' ARXIV = '{http://arxiv.org/OAI/arXiv/}' def prepare_record(record): \"\"\"Formats the", "is not None before trying to access the text result['doi'] = doi.text if", "None else None authors = [] for author in info.find(ARXIV + 'authors'): a", "+ 'ListRecords').find(OAI + 'resumptionToken') if token is None or token.text is None: break" ]
[ "answer = 0 while answer**2 < objetive: answer += 1 if answer**2 ==", "number: ')) answer = 0 while answer**2 < objetive: answer += 1 if", "0 while answer**2 < objetive: answer += 1 if answer**2 == objetive: print(f'Square", "== objetive: print(f'Square root of {objetive} is {answer}') else: print(f'{objetive} it doesn´t have", "root of {objetive} is {answer}') else: print(f'{objetive} it doesn´t have square root exact')", "answer**2 == objetive: print(f'Square root of {objetive} is {answer}') else: print(f'{objetive} it doesn´t", "a whole number: ')) answer = 0 while answer**2 < objetive: answer +=", "= 0 while answer**2 < objetive: answer += 1 if answer**2 == objetive:", "while answer**2 < objetive: answer += 1 if answer**2 == objetive: print(f'Square root", "+= 1 if answer**2 == objetive: print(f'Square root of {objetive} is {answer}') else:", "int(input('Choose a whole number: ')) answer = 0 while answer**2 < objetive: answer", "print(f'Square root of {objetive} is {answer}') else: print(f'{objetive} it doesn´t have square root", "answer += 1 if answer**2 == objetive: print(f'Square root of {objetive} is {answer}')", "')) answer = 0 while answer**2 < objetive: answer += 1 if answer**2", "= int(input('Choose a whole number: ')) answer = 0 while answer**2 < objetive:", "whole number: ')) answer = 0 while answer**2 < objetive: answer += 1", "objetive = int(input('Choose a whole number: ')) answer = 0 while answer**2 <", "answer**2 < objetive: answer += 1 if answer**2 == objetive: print(f'Square root of", "1 if answer**2 == objetive: print(f'Square root of {objetive} is {answer}') else: print(f'{objetive}", "objetive: print(f'Square root of {objetive} is {answer}') else: print(f'{objetive} it doesn´t have square", "objetive: answer += 1 if answer**2 == objetive: print(f'Square root of {objetive} is", "< objetive: answer += 1 if answer**2 == objetive: print(f'Square root of {objetive}", "if answer**2 == objetive: print(f'Square root of {objetive} is {answer}') else: print(f'{objetive} it" ]
[ "queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']] print('Total queries', len(queries)) # Leaving only test", "part; it usually takes around 5 minutes. print('Sorting items per query by popularity...')", "np.array(items)[np.array(items_scores).argsort()] # Squashing items together s = ','.join(sorted_items) # and writing them to", "= pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases)) # Calculating popularity as [Amount of views] *", "# Fancy progressbar if i % step == 0: print(5 * i /", "purchases; taking itemId column purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases)) # Calculating popularity", "<dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']] print('Total queries', len(queries)) # Leaving only", "# by <NAME> & <NAME> import numpy as np import pandas as pd", "we can use argsort items_scores = list(map(lambda x: -prod_pop.get(x, 0), items)) # Sorting", "clicks * 2 + [Amount of purchases] * 3 print('Scoring popularity for each", "datetime.datetime.now() print(\"Done. Now it's \", end_time.isoformat()) print(\"Calculated baseline in \", (end_time - start_time).seconds,", "<NAME> import numpy as np import pandas as pd import datetime start_time =", "product not in prod_pop: prod_pop[product] = cost else: prod_pop[product] += cost print('Popularity scored", "2 + [Amount of purchases] * 3 print('Scoring popularity for each item ...')", "print('Test queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True) # Loading item views; taking itemId", "cost print('Popularity scored for', len(prod_pop), 'products') # For each query: # parse items", "taking itemId column clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks)) # Loading purchases; taking", "itemId column clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks)) # Loading purchases; taking itemId", "per query by popularity...') answers = [] step = int(len(queries) / 20) with", "Fancy progressbar if i % step == 0: print(5 * i / step,", "queries[queries['is.test'] == True][['queryId', 'items']] print('Test queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True) # Loading", "Calculating popularity as [Amount of views] * 1 + Amount of clicks *", "Also, inverting scores here, so we can use argsort items_scores = list(map(lambda x:", "Loading queries (assuming data placed in <dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']]", "queries = queries[queries['is.test'] == True][['queryId', 'items']] print('Test queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True)", "= {} for cost, container in enumerate([item_views, clicks, purchases]): for prod in container.values:", "last column) # sort them by score; # write them to the submission", "0: print(5 * i / step, '%...') # Splitting last column which contains", "0), items)) # Sorting items using items_scores order permutation sorted_items = np.array(items)[np.array(items_scores).argsort()] #", "popularity for each item ...') prod_pop = {} for cost, container in enumerate([item_views,", "items using items_scores order permutation sorted_items = np.array(items)[np.array(items_scores).argsort()] # Squashing items together s", "together s = ','.join(sorted_items) # and writing them to submission submission.write(str(q[0]) + \"", "list(map(lambda x: -prod_pop.get(x, 0), items)) # Sorting items using items_scores order permutation sorted_items", "== True][['queryId', 'items']] print('Test queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True) # Loading item", "taking itemId column purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases)) # Calculating popularity as", "# Leaving only test queries (the ones which items we have to sort)", "queries', len(queries)) # Leaving only test queries (the ones which items we have", "item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views', len(item_views)) # Loading clicks; taking itemId column", "2016 # by <NAME> & <NAME> import numpy as np import pandas as", "-prod_pop.get(x, 0), items)) # Sorting items using items_scores order permutation sorted_items = np.array(items)[np.array(items_scores).argsort()]", "prod in container.values: product = str(prod[0]) if product not in prod_pop: prod_pop[product] =", "as pd import datetime start_time = datetime.datetime.now() print(\"Running baseline. Now it's\", start_time.isoformat()) #", "views', len(item_views)) # Loading clicks; taking itemId column clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks',", "to sort) queries = queries[queries['is.test'] == True][['queryId', 'items']] print('Test queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'],", "numpy as np import pandas as pd import datetime start_time = datetime.datetime.now() print(\"Running", "datetime start_time = datetime.datetime.now() print(\"Running baseline. Now it's\", start_time.isoformat()) # Loading queries (assuming", "\" \" + s + \"\\n\") end_time = datetime.datetime.now() print(\"Done. Now it's \",", "+= cost print('Popularity scored for', len(prod_pop), 'products') # For each query: # parse", "= pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks)) # Loading purchases; taking itemId column purchases =", "for each item. Also, inverting scores here, so we can use argsort items_scores", "Cup 2016 # by <NAME> & <NAME> import numpy as np import pandas", "[Amount of purchases] * 3 print('Scoring popularity for each item ...') prod_pop =", "Loading purchases; taking itemId column purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases)) # Calculating", "len(purchases)) # Calculating popularity as [Amount of views] * 1 + Amount of", "is longest part; it usually takes around 5 minutes. print('Sorting items per query", "to submission submission.write(str(q[0]) + \" \" + s + \"\\n\") end_time = datetime.datetime.now()", "print('Popularity scored for', len(prod_pop), 'products') # For each query: # parse items (comma-separated", "takes around 5 minutes. print('Sorting items per query by popularity...') answers = []", "pandas as pd import datetime start_time = datetime.datetime.now() print(\"Running baseline. Now it's\", start_time.isoformat())", "in container.values: product = str(prod[0]) if product not in prod_pop: prod_pop[product] = cost", "item ...') prod_pop = {} for cost, container in enumerate([item_views, clicks, purchases]): for", "as [Amount of views] * 1 + Amount of clicks * 2 +", "print('Scoring popularity for each item ...') prod_pop = {} for cost, container in", "scored for', len(prod_pop), 'products') # For each query: # parse items (comma-separated values", "1 + Amount of clicks * 2 + [Amount of purchases] * 3", "...') prod_pop = {} for cost, container in enumerate([item_views, clicks, purchases]): for prod", "# This is sample baseline for CIKM Personalization Cup 2016 # by <NAME>", "for i, q in enumerate(queries.values): # Fancy progressbar if i % step ==", "them by score; # write them to the submission file. # This is", "sample baseline for CIKM Personalization Cup 2016 # by <NAME> & <NAME> import", "it's\", start_time.isoformat()) # Loading queries (assuming data placed in <dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv',", "ones which items we have to sort) queries = queries[queries['is.test'] == True][['queryId', 'items']]", "in prod_pop: prod_pop[product] = cost else: prod_pop[product] += cost print('Popularity scored for', len(prod_pop),", "order permutation sorted_items = np.array(items)[np.array(items_scores).argsort()] # Squashing items together s = ','.join(sorted_items) #", "which contains comma-separated items items = q[-1].split(',') # Getting scores for each item.", "items items = q[-1].split(',') # Getting scores for each item. Also, inverting scores", "<NAME> & <NAME> import numpy as np import pandas as pd import datetime", "# Loading item views; taking itemId column item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views',", "sep=';')[['itemId']] print('Purchases', len(purchases)) # Calculating popularity as [Amount of views] * 1 +", "if product not in prod_pop: prod_pop[product] = cost else: prod_pop[product] += cost print('Popularity", "Squashing items together s = ','.join(sorted_items) # and writing them to submission submission.write(str(q[0])", "print('Sorting items per query by popularity...') answers = [] step = int(len(queries) /", "Loading item views; taking itemId column item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views', len(item_views))", "score; # write them to the submission file. # This is longest part;", "Splitting last column which contains comma-separated items items = q[-1].split(',') # Getting scores", "enumerate(queries.values): # Fancy progressbar if i % step == 0: print(5 * i", "Amount of clicks * 2 + [Amount of purchases] * 3 print('Scoring popularity", "print('Purchases', len(purchases)) # Calculating popularity as [Amount of views] * 1 + Amount", "clicks, purchases]): for prod in container.values: product = str(prod[0]) if product not in", "int(len(queries) / 20) with open('submission.txt', 'w+') as submission: for i, q in enumerate(queries.values):", "in <dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']] print('Total queries', len(queries)) # Leaving", "{} for cost, container in enumerate([item_views, clicks, purchases]): for prod in container.values: product", "enumerate([item_views, clicks, purchases]): for prod in container.values: product = str(prod[0]) if product not", "3 print('Scoring popularity for each item ...') prod_pop = {} for cost, container", "inverting scores here, so we can use argsort items_scores = list(map(lambda x: -prod_pop.get(x,", "file. # This is longest part; it usually takes around 5 minutes. print('Sorting", "submission.write(str(q[0]) + \" \" + s + \"\\n\") end_time = datetime.datetime.now() print(\"Done. Now", "the submission file. # This is longest part; it usually takes around 5", "'items']] print('Test queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True) # Loading item views; taking", "query: # parse items (comma-separated values in last column) # sort them by", "& <NAME> import numpy as np import pandas as pd import datetime start_time", "column which contains comma-separated items items = q[-1].split(',') # Getting scores for each", "for', len(prod_pop), 'products') # For each query: # parse items (comma-separated values in", "import pandas as pd import datetime start_time = datetime.datetime.now() print(\"Running baseline. Now it's\",", "progressbar if i % step == 0: print(5 * i / step, '%...')", "popularity as [Amount of views] * 1 + Amount of clicks * 2", "data placed in <dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']] print('Total queries', len(queries))", "Loading clicks; taking itemId column clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks)) # Loading", "Sorting items using items_scores order permutation sorted_items = np.array(items)[np.array(items_scores).argsort()] # Squashing items together", "pd import datetime start_time = datetime.datetime.now() print(\"Running baseline. Now it's\", start_time.isoformat()) # Loading", "them to the submission file. # This is longest part; it usually takes", "views; taking itemId column item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views', len(item_views)) # Loading", "in enumerate(queries.values): # Fancy progressbar if i % step == 0: print(5 *", "[] step = int(len(queries) / 20) with open('submission.txt', 'w+') as submission: for i,", "as np import pandas as pd import datetime start_time = datetime.datetime.now() print(\"Running baseline.", "items per query by popularity...') answers = [] step = int(len(queries) / 20)", "This is longest part; it usually takes around 5 minutes. print('Sorting items per", "= list(map(lambda x: -prod_pop.get(x, 0), items)) # Sorting items using items_scores order permutation", "scores for each item. Also, inverting scores here, so we can use argsort", "s = ','.join(sorted_items) # and writing them to submission submission.write(str(q[0]) + \" \"", "i, q in enumerate(queries.values): # Fancy progressbar if i % step == 0:", "write them to the submission file. # This is longest part; it usually", "'w+') as submission: for i, q in enumerate(queries.values): # Fancy progressbar if i", "submission file. # This is longest part; it usually takes around 5 minutes.", "use argsort items_scores = list(map(lambda x: -prod_pop.get(x, 0), items)) # Sorting items using", "# write them to the submission file. # This is longest part; it", "% step == 0: print(5 * i / step, '%...') # Splitting last", "product = str(prod[0]) if product not in prod_pop: prod_pop[product] = cost else: prod_pop[product]", "inplace=True) # Loading item views; taking itemId column item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item", "items together s = ','.join(sorted_items) # and writing them to submission submission.write(str(q[0]) +", "+ \" \" + s + \"\\n\") end_time = datetime.datetime.now() print(\"Done. Now it's", "if i % step == 0: print(5 * i / step, '%...') #", "item. Also, inverting scores here, so we can use argsort items_scores = list(map(lambda", "writing them to submission submission.write(str(q[0]) + \" \" + s + \"\\n\") end_time", "print(\"Done. Now it's \", end_time.isoformat()) print(\"Calculated baseline in \", (end_time - start_time).seconds, \"", "for prod in container.values: product = str(prod[0]) if product not in prod_pop: prod_pop[product]", "clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks)) # Loading purchases; taking itemId column purchases", "= pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']] print('Total queries', len(queries)) # Leaving only test queries", "for each item ...') prod_pop = {} for cost, container in enumerate([item_views, clicks,", "print('Clicks', len(clicks)) # Loading purchases; taking itemId column purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases',", "container in enumerate([item_views, clicks, purchases]): for prod in container.values: product = str(prod[0]) if", "cost, container in enumerate([item_views, clicks, purchases]): for prod in container.values: product = str(prod[0])", "For each query: # parse items (comma-separated values in last column) # sort", "/ 20) with open('submission.txt', 'w+') as submission: for i, q in enumerate(queries.values): #", "views] * 1 + Amount of clicks * 2 + [Amount of purchases]", "contains comma-separated items items = q[-1].split(',') # Getting scores for each item. Also,", "pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']] print('Total queries', len(queries)) # Leaving only test queries (the", "True][['queryId', 'items']] print('Test queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True) # Loading item views;", "q in enumerate(queries.values): # Fancy progressbar if i % step == 0: print(5", "can use argsort items_scores = list(map(lambda x: -prod_pop.get(x, 0), items)) # Sorting items", "item views; taking itemId column item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views', len(item_views)) #", "only test queries (the ones which items we have to sort) queries =", "len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True) # Loading item views; taking itemId column item_views", "prod_pop[product] += cost print('Popularity scored for', len(prod_pop), 'products') # For each query: #", "queries (the ones which items we have to sort) queries = queries[queries['is.test'] ==", "print(\"Running baseline. Now it's\", start_time.isoformat()) # Loading queries (assuming data placed in <dataset-train/>", "usually takes around 5 minutes. print('Sorting items per query by popularity...') answers =", "Personalization Cup 2016 # by <NAME> & <NAME> import numpy as np import", "them to submission submission.write(str(q[0]) + \" \" + s + \"\\n\") end_time =", "sorted_items = np.array(items)[np.array(items_scores).argsort()] # Squashing items together s = ','.join(sorted_items) # and writing", "* 1 + Amount of clicks * 2 + [Amount of purchases] *", "else: prod_pop[product] += cost print('Popularity scored for', len(prod_pop), 'products') # For each query:", "column) # sort them by score; # write them to the submission file.", "placed in <dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']] print('Total queries', len(queries)) #", "by <NAME> & <NAME> import numpy as np import pandas as pd import", "# Loading queries (assuming data placed in <dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items',", "= int(len(queries) / 20) with open('submission.txt', 'w+') as submission: for i, q in", "pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views', len(item_views)) # Loading clicks; taking itemId column clicks =", "test queries (the ones which items we have to sort) queries = queries[queries['is.test']", "argsort items_scores = list(map(lambda x: -prod_pop.get(x, 0), items)) # Sorting items using items_scores", "+ \"\\n\") end_time = datetime.datetime.now() print(\"Done. Now it's \", end_time.isoformat()) print(\"Calculated baseline in", "not in prod_pop: prod_pop[product] = cost else: prod_pop[product] += cost print('Popularity scored for',", "sep=';')[['itemId']] print('Item views', len(item_views)) # Loading clicks; taking itemId column clicks = pd.read_csv('dataset-train/train-clicks.csv',", "prod_pop: prod_pop[product] = cost else: prod_pop[product] += cost print('Popularity scored for', len(prod_pop), 'products')", "CIKM Personalization Cup 2016 # by <NAME> & <NAME> import numpy as np", "clicks; taking itemId column clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks)) # Loading purchases;", "have to sort) queries = queries[queries['is.test'] == True][['queryId', 'items']] print('Test queries', len(queries)) queries.reset_index(inplace=True)", "= [] step = int(len(queries) / 20) with open('submission.txt', 'w+') as submission: for", "sep=';')[['queryId', 'items', 'is.test']] print('Total queries', len(queries)) # Leaving only test queries (the ones", "which items we have to sort) queries = queries[queries['is.test'] == True][['queryId', 'items']] print('Test", "of clicks * 2 + [Amount of purchases] * 3 print('Scoring popularity for", "baseline. Now it's\", start_time.isoformat()) # Loading queries (assuming data placed in <dataset-train/> queries", "== 0: print(5 * i / step, '%...') # Splitting last column which", "it usually takes around 5 minutes. print('Sorting items per query by popularity...') answers", "# Loading clicks; taking itemId column clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks)) #", "= datetime.datetime.now() print(\"Running baseline. Now it's\", start_time.isoformat()) # Loading queries (assuming data placed", "by score; # write them to the submission file. # This is longest", "queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True) # Loading item views; taking itemId column item_views =", "in enumerate([item_views, clicks, purchases]): for prod in container.values: product = str(prod[0]) if product", "open('submission.txt', 'w+') as submission: for i, q in enumerate(queries.values): # Fancy progressbar if", "# Squashing items together s = ','.join(sorted_items) # and writing them to submission", "','.join(sorted_items) # and writing them to submission submission.write(str(q[0]) + \" \" + s", "= str(prod[0]) if product not in prod_pop: prod_pop[product] = cost else: prod_pop[product] +=", "* i / step, '%...') # Splitting last column which contains comma-separated items", "of purchases] * 3 print('Scoring popularity for each item ...') prod_pop = {}", "sep=';')[['itemId']] print('Clicks', len(clicks)) # Loading purchases; taking itemId column purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']]", "comma-separated items items = q[-1].split(',') # Getting scores for each item. Also, inverting", "taking itemId column item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views', len(item_views)) # Loading clicks;", "each item. Also, inverting scores here, so we can use argsort items_scores =", "queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True) # Loading item views; taking itemId column", "column clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks)) # Loading purchases; taking itemId column", "len(prod_pop), 'products') # For each query: # parse items (comma-separated values in last", "purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases)) # Calculating popularity as [Amount of views]", "each query: # parse items (comma-separated values in last column) # sort them", "longest part; it usually takes around 5 minutes. print('Sorting items per query by", "= queries[queries['is.test'] == True][['queryId', 'items']] print('Test queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1, inplace=True) #", "pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks)) # Loading purchases; taking itemId column purchases = pd.read_csv('dataset-train/train-purchases.csv',", "column purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases)) # Calculating popularity as [Amount of", "= pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views', len(item_views)) # Loading clicks; taking itemId column clicks", "for cost, container in enumerate([item_views, clicks, purchases]): for prod in container.values: product =", "# This is longest part; it usually takes around 5 minutes. print('Sorting items", "sort) queries = queries[queries['is.test'] == True][['queryId', 'items']] print('Test queries', len(queries)) queries.reset_index(inplace=True) queries.drop(['index'], axis=1,", "container.values: product = str(prod[0]) if product not in prod_pop: prod_pop[product] = cost else:", "axis=1, inplace=True) # Loading item views; taking itemId column item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']]", "Now it's \", end_time.isoformat()) print(\"Calculated baseline in \", (end_time - start_time).seconds, \" seconds\")", "submission: for i, q in enumerate(queries.values): # Fancy progressbar if i % step", "as submission: for i, q in enumerate(queries.values): # Fancy progressbar if i %", "Leaving only test queries (the ones which items we have to sort) queries", "start_time = datetime.datetime.now() print(\"Running baseline. Now it's\", start_time.isoformat()) # Loading queries (assuming data", "q[-1].split(',') # Getting scores for each item. Also, inverting scores here, so we", "cost else: prod_pop[product] += cost print('Popularity scored for', len(prod_pop), 'products') # For each", "using items_scores order permutation sorted_items = np.array(items)[np.array(items_scores).argsort()] # Squashing items together s =", "# Sorting items using items_scores order permutation sorted_items = np.array(items)[np.array(items_scores).argsort()] # Squashing items", "= datetime.datetime.now() print(\"Done. Now it's \", end_time.isoformat()) print(\"Calculated baseline in \", (end_time -", "minutes. print('Sorting items per query by popularity...') answers = [] step = int(len(queries)", "step, '%...') # Splitting last column which contains comma-separated items items = q[-1].split(',')", "values in last column) # sort them by score; # write them to", "items)) # Sorting items using items_scores order permutation sorted_items = np.array(items)[np.array(items_scores).argsort()] # Squashing", "np import pandas as pd import datetime start_time = datetime.datetime.now() print(\"Running baseline. Now", "= ','.join(sorted_items) # and writing them to submission submission.write(str(q[0]) + \" \" +", "items_scores = list(map(lambda x: -prod_pop.get(x, 0), items)) # Sorting items using items_scores order", "print('Total queries', len(queries)) # Leaving only test queries (the ones which items we", "in last column) # sort them by score; # write them to the", "Getting scores for each item. Also, inverting scores here, so we can use", "# parse items (comma-separated values in last column) # sort them by score;", "prod_pop[product] = cost else: prod_pop[product] += cost print('Popularity scored for', len(prod_pop), 'products') #", "each item ...') prod_pop = {} for cost, container in enumerate([item_views, clicks, purchases]):", "we have to sort) queries = queries[queries['is.test'] == True][['queryId', 'items']] print('Test queries', len(queries))", "print('Item views', len(item_views)) # Loading clicks; taking itemId column clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']]", "submission submission.write(str(q[0]) + \" \" + s + \"\\n\") end_time = datetime.datetime.now() print(\"Done.", "step = int(len(queries) / 20) with open('submission.txt', 'w+') as submission: for i, q", "around 5 minutes. print('Sorting items per query by popularity...') answers = [] step", "import datetime start_time = datetime.datetime.now() print(\"Running baseline. Now it's\", start_time.isoformat()) # Loading queries", "datetime.datetime.now() print(\"Running baseline. Now it's\", start_time.isoformat()) # Loading queries (assuming data placed in", "i / step, '%...') # Splitting last column which contains comma-separated items items", "with open('submission.txt', 'w+') as submission: for i, q in enumerate(queries.values): # Fancy progressbar", "permutation sorted_items = np.array(items)[np.array(items_scores).argsort()] # Squashing items together s = ','.join(sorted_items) # and", "(the ones which items we have to sort) queries = queries[queries['is.test'] == True][['queryId',", "len(clicks)) # Loading purchases; taking itemId column purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases))", "answers = [] step = int(len(queries) / 20) with open('submission.txt', 'w+') as submission:", "popularity...') answers = [] step = int(len(queries) / 20) with open('submission.txt', 'w+') as", "# For each query: # parse items (comma-separated values in last column) #", "scores here, so we can use argsort items_scores = list(map(lambda x: -prod_pop.get(x, 0),", "/ step, '%...') # Splitting last column which contains comma-separated items items =", "queries.drop(['index'], axis=1, inplace=True) # Loading item views; taking itemId column item_views = pd.read_csv('dataset-train/train-item-views.csv',", "# Loading purchases; taking itemId column purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases)) #", "= np.array(items)[np.array(items_scores).argsort()] # Squashing items together s = ','.join(sorted_items) # and writing them", "= cost else: prod_pop[product] += cost print('Popularity scored for', len(prod_pop), 'products') # For", "Now it's\", start_time.isoformat()) # Loading queries (assuming data placed in <dataset-train/> queries =", "str(prod[0]) if product not in prod_pop: prod_pop[product] = cost else: prod_pop[product] += cost", "end_time = datetime.datetime.now() print(\"Done. Now it's \", end_time.isoformat()) print(\"Calculated baseline in \", (end_time", "queries (assuming data placed in <dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']] print('Total", "pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases)) # Calculating popularity as [Amount of views] * 1", "items (comma-separated values in last column) # sort them by score; # write", "query by popularity...') answers = [] step = int(len(queries) / 20) with open('submission.txt',", "x: -prod_pop.get(x, 0), items)) # Sorting items using items_scores order permutation sorted_items =", "This is sample baseline for CIKM Personalization Cup 2016 # by <NAME> &", "i % step == 0: print(5 * i / step, '%...') # Splitting", "to the submission file. # This is longest part; it usually takes around", "[Amount of views] * 1 + Amount of clicks * 2 + [Amount", "(comma-separated values in last column) # sort them by score; # write them", "(assuming data placed in <dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']] print('Total queries',", "last column which contains comma-separated items items = q[-1].split(',') # Getting scores for", "'products') # For each query: # parse items (comma-separated values in last column)", "len(queries)) # Leaving only test queries (the ones which items we have to", "# Getting scores for each item. Also, inverting scores here, so we can", "# sort them by score; # write them to the submission file. #", "items we have to sort) queries = queries[queries['is.test'] == True][['queryId', 'items']] print('Test queries',", "# Splitting last column which contains comma-separated items items = q[-1].split(',') # Getting", "itemId column purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']] print('Purchases', len(purchases)) # Calculating popularity as [Amount", "\"\\n\") end_time = datetime.datetime.now() print(\"Done. Now it's \", end_time.isoformat()) print(\"Calculated baseline in \",", "prod_pop = {} for cost, container in enumerate([item_views, clicks, purchases]): for prod in", "is sample baseline for CIKM Personalization Cup 2016 # by <NAME> & <NAME>", "so we can use argsort items_scores = list(map(lambda x: -prod_pop.get(x, 0), items)) #", "itemId column item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views', len(item_views)) # Loading clicks; taking", "and writing them to submission submission.write(str(q[0]) + \" \" + s + \"\\n\")", "* 2 + [Amount of purchases] * 3 print('Scoring popularity for each item", "of views] * 1 + Amount of clicks * 2 + [Amount of", "'%...') # Splitting last column which contains comma-separated items items = q[-1].split(',') #", "\" + s + \"\\n\") end_time = datetime.datetime.now() print(\"Done. Now it's \", end_time.isoformat())", "step == 0: print(5 * i / step, '%...') # Splitting last column", "'is.test']] print('Total queries', len(queries)) # Leaving only test queries (the ones which items", "import numpy as np import pandas as pd import datetime start_time = datetime.datetime.now()", "items_scores order permutation sorted_items = np.array(items)[np.array(items_scores).argsort()] # Squashing items together s = ','.join(sorted_items)", "baseline for CIKM Personalization Cup 2016 # by <NAME> & <NAME> import numpy", "+ [Amount of purchases] * 3 print('Scoring popularity for each item ...') prod_pop", "# and writing them to submission submission.write(str(q[0]) + \" \" + s +", "s + \"\\n\") end_time = datetime.datetime.now() print(\"Done. Now it's \", end_time.isoformat()) print(\"Calculated baseline", "20) with open('submission.txt', 'w+') as submission: for i, q in enumerate(queries.values): # Fancy", "len(item_views)) # Loading clicks; taking itemId column clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']] print('Clicks', len(clicks))", "* 3 print('Scoring popularity for each item ...') prod_pop = {} for cost,", "parse items (comma-separated values in last column) # sort them by score; #", "for CIKM Personalization Cup 2016 # by <NAME> & <NAME> import numpy as", "5 minutes. print('Sorting items per query by popularity...') answers = [] step =", "print(5 * i / step, '%...') # Splitting last column which contains comma-separated", "+ s + \"\\n\") end_time = datetime.datetime.now() print(\"Done. Now it's \", end_time.isoformat()) print(\"Calculated", "items = q[-1].split(',') # Getting scores for each item. Also, inverting scores here,", "here, so we can use argsort items_scores = list(map(lambda x: -prod_pop.get(x, 0), items))", "column item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']] print('Item views', len(item_views)) # Loading clicks; taking itemId", "sort them by score; # write them to the submission file. # This", "by popularity...') answers = [] step = int(len(queries) / 20) with open('submission.txt', 'w+')", "# Calculating popularity as [Amount of views] * 1 + Amount of clicks", "= q[-1].split(',') # Getting scores for each item. Also, inverting scores here, so", "purchases]): for prod in container.values: product = str(prod[0]) if product not in prod_pop:", "'items', 'is.test']] print('Total queries', len(queries)) # Leaving only test queries (the ones which", "+ Amount of clicks * 2 + [Amount of purchases] * 3 print('Scoring", "purchases] * 3 print('Scoring popularity for each item ...') prod_pop = {} for", "start_time.isoformat()) # Loading queries (assuming data placed in <dataset-train/> queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId'," ]
[ "-> torch.Tensor: \"\"\"KL divergence between categorical and categorical, KL(p||q). Args: p (torch.distributions.Distribution): PyTorch", "divergence = torch.sum(divergence, dim=self.dim) return divergence, x_dict dim_list = list(torch.arange(divergence.dim())) divergence = torch.sum(divergence,", "torch from torch._six import inf from pixyz.distributions import Distribution from pixyz.losses.losses import Loss", "Args: p (torch.distributions.Distribution): PyTorch Distribution class. q (torch.distributions.Distribution): PyTorch Distribution class. Returns: t", "dim: Optional[int] = None): self.dim = dim super().__init__(p, q, input_var) @property def _symbol(self):", "dim super().__init__(p, q, input_var) @property def _symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text))", "def _get_eval(self, x_dict: Dict[str, torch.Tensor], **kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if (not hasattr(self.p,", "torch._six import inf from pixyz.distributions import Distribution from pixyz.losses.losses import Loss from pixyz.utils", "(pixyz.distributions.distributions.Distribution): Distribution class. q (pixyz.distributions.distributions.Distribution): Distribution class. input_var (list, optional): Input variable name.", "p (torch.distributions.Distribution): PyTorch Distribution class. q (torch.distributions.Distribution): PyTorch Distribution class. Returns: t (torch.Tensor):", "from pixyz.utils import get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution ) -> torch.Tensor: \"\"\"KL", "= 0 return t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence for categorical distributions. Args:", "PyTorch Distribution class. Returns: t (torch.Tensor): Calculated KL divergence. \"\"\" t = p.probs", "= p.probs * (p.logits - q.logits) t[(q.probs == 0).expand_as(t)] = inf t[(p.probs ==", "dimension. \"\"\" def __init__(self, p: Distribution, q: Distribution, input_var: Optional[List[str]] = None, dim:", "two distributions \" \"cannot be evaluated, got %s and %s.\" % (self.p.distribution_name, self.q.distribution_name))", "0).expand_as(t)] = 0 return t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence for categorical distributions.", "Distribution from pixyz.losses.losses import Loss from pixyz.utils import get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution, q:", "and RelaxedCategorical ref) KL divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing import Optional,", "Distribution, q: Distribution, input_var: Optional[List[str]] = None, dim: Optional[int] = None): self.dim =", "Optional[List[str]] = None, dim: Optional[int] = None): self.dim = dim super().__init__(p, q, input_var)", "pixyz.losses.losses import Loss from pixyz.utils import get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution )", "True) self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist, self.q.dist) if", "0).expand_as(t)] = inf t[(p.probs == 0).expand_as(t)] = 0 return t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback", "Calculated KL divergence. \"\"\" t = p.probs * (p.logits - q.logits) t[(q.probs ==", "Returns: t (torch.Tensor): Calculated KL divergence. \"\"\" t = p.probs * (p.logits -", "= None): self.dim = dim super().__init__(p, q, input_var) @property def _symbol(self): return sympy.Symbol(\"D_{{KL}}", "ValueError(\"Divergence between these two distributions \" \"cannot be evaluated, got %s and %s.\"", "for categorical distributions. Args: p (pixyz.distributions.distributions.Distribution): Distribution class. q (pixyz.distributions.distributions.Distribution): Distribution class. input_var", "(self.p.distribution_name, self.q.distribution_name)) input_dict = get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict, self.q.input_var, True)", "variable name. dim (int, optional): Aggregate dimension. \"\"\" def __init__(self, p: Distribution, q:", "torch.distributions.Distribution, q: torch.distributions.Distribution ) -> torch.Tensor: \"\"\"KL divergence between categorical and categorical, KL(p||q).", "Input variable name. dim (int, optional): Aggregate dimension. \"\"\" def __init__(self, p: Distribution,", "p: Distribution, q: Distribution, input_var: Optional[List[str]] = None, dim: Optional[int] = None): self.dim", "hasattr(self.p, 'distribution_torch_class')) \\ or (not hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence between these two distributions", "p (pixyz.distributions.distributions.Distribution): Distribution class. q (pixyz.distributions.distributions.Distribution): Distribution class. input_var (list, optional): Input variable", ") -> torch.Tensor: \"\"\"KL divergence between categorical and categorical, KL(p||q). Args: p (torch.distributions.Distribution):", "import inf from pixyz.distributions import Distribution from pixyz.losses.losses import Loss from pixyz.utils import", "= _kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim is not None: divergence = torch.sum(divergence, dim=self.dim) return", "typing import Optional, List, Dict, Tuple import sympy import torch from torch._six import", "p.probs * (p.logits - q.logits) t[(q.probs == 0).expand_as(t)] = inf t[(p.probs == 0).expand_as(t)]", "q (pixyz.distributions.distributions.Distribution): Distribution class. input_var (list, optional): Input variable name. dim (int, optional):", "from pixyz.distributions import Distribution from pixyz.losses.losses import Loss from pixyz.utils import get_dict_values def", "'distribution_torch_class')): raise ValueError(\"Divergence between these two distributions \" \"cannot be evaluated, got %s", "Tuple import sympy import torch from torch._six import inf from pixyz.distributions import Distribution", "KL divergence. \"\"\" t = p.probs * (p.logits - q.logits) t[(q.probs == 0).expand_as(t)]", "dim (int, optional): Aggregate dimension. \"\"\" def __init__(self, p: Distribution, q: Distribution, input_var:", "(torch.distributions.Distribution): PyTorch Distribution class. q (torch.distributions.Distribution): PyTorch Distribution class. Returns: t (torch.Tensor): Calculated", "distributions \" \"cannot be evaluated, got %s and %s.\" % (self.p.distribution_name, self.q.distribution_name)) input_dict", "== 0).expand_as(t)] = inf t[(p.probs == 0).expand_as(t)] = 0 return t.sum(-1) class CategoricalKullbackLeibler(Loss):", "Dict[str, torch.Tensor]]: if (not hasattr(self.p, 'distribution_torch_class')) \\ or (not hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence", "\\ or (not hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence between these two distributions \" \"cannot", "between these two distributions \" \"cannot be evaluated, got %s and %s.\" %", "= inf t[(p.probs == 0).expand_as(t)] = 0 return t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler", "sympy import torch from torch._six import inf from pixyz.distributions import Distribution from pixyz.losses.losses", "divergence KL loss for Categorical and RelaxedCategorical ref) KL divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence", "Optional, List, Dict, Tuple import sympy import torch from torch._six import inf from", "t[(p.probs == 0).expand_as(t)] = 0 return t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence for", "\" \"cannot be evaluated, got %s and %s.\" % (self.p.distribution_name, self.q.distribution_name)) input_dict =", "self.dim = dim super().__init__(p, q, input_var) @property def _symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format(", "from torch._six import inf from pixyz.distributions import Distribution from pixyz.losses.losses import Loss from", "RelaxedCategorical ref) KL divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing import Optional, List,", "t[(q.probs == 0).expand_as(t)] = inf t[(p.probs == 0).expand_as(t)] = 0 return t.sum(-1) class", "KL loss for Categorical and RelaxedCategorical ref) KL divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\"", "\"\"\"Kullback Leibler divergence for categorical distributions. Args: p (pixyz.distributions.distributions.Distribution): Distribution class. q (pixyz.distributions.distributions.Distribution):", "categorical, KL(p||q). Args: p (torch.distributions.Distribution): PyTorch Distribution class. q (torch.distributions.Distribution): PyTorch Distribution class.", "get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution ) -> torch.Tensor: \"\"\"KL divergence between categorical", "Args: p (pixyz.distributions.distributions.Distribution): Distribution class. q (pixyz.distributions.distributions.Distribution): Distribution class. input_var (list, optional): Input", "= None, dim: Optional[int] = None): self.dim = dim super().__init__(p, q, input_var) @property", "optional): Aggregate dimension. \"\"\" def __init__(self, p: Distribution, q: Distribution, input_var: Optional[List[str]] =", "(pixyz.distributions.distributions.Distribution): Distribution class. input_var (list, optional): Input variable name. dim (int, optional): Aggregate", "%s and %s.\" % (self.p.distribution_name, self.q.distribution_name)) input_dict = get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict) input_dict", "= torch.sum(divergence, dim=self.dim) return divergence, x_dict dim_list = list(torch.arange(divergence.dim())) divergence = torch.sum(divergence, dim=dim_list[1:])", "loss for Categorical and RelaxedCategorical ref) KL divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from", "distributions. Args: p (pixyz.distributions.distributions.Distribution): Distribution class. q (pixyz.distributions.distributions.Distribution): Distribution class. input_var (list, optional):", "'distribution_torch_class')) \\ or (not hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence between these two distributions \"", "input_var: Optional[List[str]] = None, dim: Optional[int] = None): self.dim = dim super().__init__(p, q,", "not None: divergence = torch.sum(divergence, dim=self.dim) return divergence, x_dict dim_list = list(torch.arange(divergence.dim())) divergence", "torch.sum(divergence, dim=self.dim) return divergence, x_dict dim_list = list(torch.arange(divergence.dim())) divergence = torch.sum(divergence, dim=dim_list[1:]) return", "@property def _symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text)) def _get_eval(self, x_dict: Dict[str,", "import Distribution from pixyz.losses.losses import Loss from pixyz.utils import get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution,", "CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence for categorical distributions. Args: p (pixyz.distributions.distributions.Distribution): Distribution class. q", "PyTorch Distribution class. q (torch.distributions.Distribution): PyTorch Distribution class. Returns: t (torch.Tensor): Calculated KL", "input_var (list, optional): Input variable name. dim (int, optional): Aggregate dimension. \"\"\" def", "self.p.input_var, True) self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist, self.q.dist)", "% (self.p.distribution_name, self.q.distribution_name)) input_dict = get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict, self.q.input_var,", "_kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution ) -> torch.Tensor: \"\"\"KL divergence between categorical and categorical,", "_kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim is not None: divergence = torch.sum(divergence, dim=self.dim) return divergence,", "evaluated, got %s and %s.\" % (self.p.distribution_name, self.q.distribution_name)) input_dict = get_dict_values(x_dict, self.p.input_var, True)", "pixyz.utils import get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution ) -> torch.Tensor: \"\"\"KL divergence", "self.q.dist) if self.dim is not None: divergence = torch.sum(divergence, dim=self.dim) return divergence, x_dict", "= get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict) divergence =", "self.q.prob_text)) def _get_eval(self, x_dict: Dict[str, torch.Tensor], **kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if (not", "import get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution ) -> torch.Tensor: \"\"\"KL divergence between", "(p.logits - q.logits) t[(q.probs == 0).expand_as(t)] = inf t[(p.probs == 0).expand_as(t)] = 0", "be evaluated, got %s and %s.\" % (self.p.distribution_name, self.q.distribution_name)) input_dict = get_dict_values(x_dict, self.p.input_var,", "inf t[(p.probs == 0).expand_as(t)] = 0 return t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence", "== 0).expand_as(t)] = 0 return t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence for categorical", "divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing import Optional, List, Dict, Tuple import", "self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim", "\"\"\"KL divergence between categorical and categorical, KL(p||q). Args: p (torch.distributions.Distribution): PyTorch Distribution class.", "Distribution class. input_var (list, optional): Input variable name. dim (int, optional): Aggregate dimension.", "(list, optional): Input variable name. dim (int, optional): Aggregate dimension. \"\"\" def __init__(self,", "get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim is not None:", "pixyz.distributions import Distribution from pixyz.losses.losses import Loss from pixyz.utils import get_dict_values def _kl_categorical_categorical(p:", "import Optional, List, Dict, Tuple import sympy import torch from torch._six import inf", "categorical and categorical, KL(p||q). Args: p (torch.distributions.Distribution): PyTorch Distribution class. q (torch.distributions.Distribution): PyTorch", "input_dict = get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict) divergence", "= dim super().__init__(p, q, input_var) @property def _symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text,", "\"\"\" t = p.probs * (p.logits - q.logits) t[(q.probs == 0).expand_as(t)] = inf", "\"\"\"Discrete KL divergence KL loss for Categorical and RelaxedCategorical ref) KL divergence in", "Distribution, input_var: Optional[List[str]] = None, dim: Optional[int] = None): self.dim = dim super().__init__(p,", "**kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if (not hasattr(self.p, 'distribution_torch_class')) \\ or (not hasattr(self.q,", "return divergence, x_dict dim_list = list(torch.arange(divergence.dim())) divergence = torch.sum(divergence, dim=dim_list[1:]) return divergence, x_dict", "for Categorical and RelaxedCategorical ref) KL divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing", "raise ValueError(\"Divergence between these two distributions \" \"cannot be evaluated, got %s and", "from typing import Optional, List, Dict, Tuple import sympy import torch from torch._six", "and categorical, KL(p||q). Args: p (torch.distributions.Distribution): PyTorch Distribution class. q (torch.distributions.Distribution): PyTorch Distribution", "q: Distribution, input_var: Optional[List[str]] = None, dim: Optional[int] = None): self.dim = dim", "* (p.logits - q.logits) t[(q.probs == 0).expand_as(t)] = inf t[(p.probs == 0).expand_as(t)] =", "= get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim is not", "Categorical and RelaxedCategorical ref) KL divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing import", "get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist,", "torch.Tensor]]: if (not hasattr(self.p, 'distribution_torch_class')) \\ or (not hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence between", "Distribution class. q (pixyz.distributions.distributions.Distribution): Distribution class. input_var (list, optional): Input variable name. dim", "t = p.probs * (p.logits - q.logits) t[(q.probs == 0).expand_as(t)] = inf t[(p.probs", "divergence. \"\"\" t = p.probs * (p.logits - q.logits) t[(q.probs == 0).expand_as(t)] =", "def __init__(self, p: Distribution, q: Distribution, input_var: Optional[List[str]] = None, dim: Optional[int] =", "Optional[int] = None): self.dim = dim super().__init__(p, q, input_var) @property def _symbol(self): return", "from pixyz.losses.losses import Loss from pixyz.utils import get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution", "sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text)) def _get_eval(self, x_dict: Dict[str, torch.Tensor], **kwargs) -> Tuple[torch.Tensor,", "x_dict: Dict[str, torch.Tensor], **kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if (not hasattr(self.p, 'distribution_torch_class')) \\", "q (torch.distributions.Distribution): PyTorch Distribution class. Returns: t (torch.Tensor): Calculated KL divergence. \"\"\" t", "\"\"\" def __init__(self, p: Distribution, q: Distribution, input_var: Optional[List[str]] = None, dim: Optional[int]", "inf from pixyz.distributions import Distribution from pixyz.losses.losses import Loss from pixyz.utils import get_dict_values", "(torch.distributions.Distribution): PyTorch Distribution class. Returns: t (torch.Tensor): Calculated KL divergence. \"\"\" t =", "super().__init__(p, q, input_var) @property def _symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text)) def", "divergence = _kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim is not None: divergence = torch.sum(divergence, dim=self.dim)", "KL divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing import Optional, List, Dict, Tuple", "Loss from pixyz.utils import get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution ) -> torch.Tensor:", "Dict[str, torch.Tensor], **kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if (not hasattr(self.p, 'distribution_torch_class')) \\ or", "torch.distributions.Distribution ) -> torch.Tensor: \"\"\"KL divergence between categorical and categorical, KL(p||q). Args: p", "got %s and %s.\" % (self.p.distribution_name, self.q.distribution_name)) input_dict = get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict)", "import torch from torch._six import inf from pixyz.distributions import Distribution from pixyz.losses.losses import", "if (not hasattr(self.p, 'distribution_torch_class')) \\ or (not hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence between these", "q, input_var) @property def _symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text)) def _get_eval(self,", "torch.Tensor], **kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if (not hasattr(self.p, 'distribution_torch_class')) \\ or (not", "or (not hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence between these two distributions \" \"cannot be", "name. dim (int, optional): Aggregate dimension. \"\"\" def __init__(self, p: Distribution, q: Distribution,", "dim=self.dim) return divergence, x_dict dim_list = list(torch.arange(divergence.dim())) divergence = torch.sum(divergence, dim=dim_list[1:]) return divergence,", "these two distributions \" \"cannot be evaluated, got %s and %s.\" % (self.p.distribution_name,", "q: torch.distributions.Distribution ) -> torch.Tensor: \"\"\"KL divergence between categorical and categorical, KL(p||q). Args:", "input_dict = get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim is", "- q.logits) t[(q.probs == 0).expand_as(t)] = inf t[(p.probs == 0).expand_as(t)] = 0 return", "-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if (not hasattr(self.p, 'distribution_torch_class')) \\ or (not hasattr(self.q, 'distribution_torch_class')):", "\\\\right]\".format( self.p.prob_text, self.q.prob_text)) def _get_eval(self, x_dict: Dict[str, torch.Tensor], **kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:", "import sympy import torch from torch._six import inf from pixyz.distributions import Distribution from", "(int, optional): Aggregate dimension. \"\"\" def __init__(self, p: Distribution, q: Distribution, input_var: Optional[List[str]]", "in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing import Optional, List, Dict, Tuple import sympy", "self.q.distribution_name)) input_dict = get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict, self.q.input_var, True) self.q.set_dist(input_dict)", "class. q (pixyz.distributions.distributions.Distribution): Distribution class. input_var (list, optional): Input variable name. dim (int,", "\"cannot be evaluated, got %s and %s.\" % (self.p.distribution_name, self.q.distribution_name)) input_dict = get_dict_values(x_dict,", "List, Dict, Tuple import sympy import torch from torch._six import inf from pixyz.distributions", "between categorical and categorical, KL(p||q). Args: p (torch.distributions.Distribution): PyTorch Distribution class. q (torch.distributions.Distribution):", "self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim is not None: divergence = torch.sum(divergence,", "_get_eval(self, x_dict: Dict[str, torch.Tensor], **kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if (not hasattr(self.p, 'distribution_torch_class'))", "(not hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence between these two distributions \" \"cannot be evaluated,", "self.dim is not None: divergence = torch.sum(divergence, dim=self.dim) return divergence, x_dict dim_list =", "ref) KL divergence in PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing import Optional, List, Dict,", "import Loss from pixyz.utils import get_dict_values def _kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution ) ->", "return t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence for categorical distributions. Args: p (pixyz.distributions.distributions.Distribution):", "Aggregate dimension. \"\"\" def __init__(self, p: Distribution, q: Distribution, input_var: Optional[List[str]] = None,", "KL(p||q). Args: p (torch.distributions.Distribution): PyTorch Distribution class. q (torch.distributions.Distribution): PyTorch Distribution class. Returns:", "self.p.prob_text, self.q.prob_text)) def _get_eval(self, x_dict: Dict[str, torch.Tensor], **kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if", "class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence for categorical distributions. Args: p (pixyz.distributions.distributions.Distribution): Distribution class.", "class. q (torch.distributions.Distribution): PyTorch Distribution class. Returns: t (torch.Tensor): Calculated KL divergence. \"\"\"", "return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text)) def _get_eval(self, x_dict: Dict[str, torch.Tensor], **kwargs) ->", "https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing import Optional, List, Dict, Tuple import sympy import torch", "class. Returns: t (torch.Tensor): Calculated KL divergence. \"\"\" t = p.probs * (p.logits", "(torch.Tensor): Calculated KL divergence. \"\"\" t = p.probs * (p.logits - q.logits) t[(q.probs", "None): self.dim = dim super().__init__(p, q, input_var) @property def _symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{}", "torch.Tensor: \"\"\"KL divergence between categorical and categorical, KL(p||q). Args: p (torch.distributions.Distribution): PyTorch Distribution", "q.logits) t[(q.probs == 0).expand_as(t)] = inf t[(p.probs == 0).expand_as(t)] = 0 return t.sum(-1)", "is not None: divergence = torch.sum(divergence, dim=self.dim) return divergence, x_dict dim_list = list(torch.arange(divergence.dim()))", "_symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text)) def _get_eval(self, x_dict: Dict[str, torch.Tensor], **kwargs)", "(not hasattr(self.p, 'distribution_torch_class')) \\ or (not hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence between these two", "\\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text)) def _get_eval(self, x_dict: Dict[str, torch.Tensor], **kwargs) -> Tuple[torch.Tensor, Dict[str,", "hasattr(self.q, 'distribution_torch_class')): raise ValueError(\"Divergence between these two distributions \" \"cannot be evaluated, got", "0 return t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence for categorical distributions. Args: p", "None: divergence = torch.sum(divergence, dim=self.dim) return divergence, x_dict dim_list = list(torch.arange(divergence.dim())) divergence =", "Distribution class. Returns: t (torch.Tensor): Calculated KL divergence. \"\"\" t = p.probs *", "def _symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text)) def _get_eval(self, x_dict: Dict[str, torch.Tensor],", "%s.\" % (self.p.distribution_name, self.q.distribution_name)) input_dict = get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict) input_dict = get_dict_values(x_dict,", "None, dim: Optional[int] = None): self.dim = dim super().__init__(p, q, input_var) @property def", "class. input_var (list, optional): Input variable name. dim (int, optional): Aggregate dimension. \"\"\"", "and %s.\" % (self.p.distribution_name, self.q.distribution_name)) input_dict = get_dict_values(x_dict, self.p.input_var, True) self.p.set_dist(input_dict) input_dict =", "divergence for categorical distributions. Args: p (pixyz.distributions.distributions.Distribution): Distribution class. q (pixyz.distributions.distributions.Distribution): Distribution class.", "Distribution class. q (torch.distributions.Distribution): PyTorch Distribution class. Returns: t (torch.Tensor): Calculated KL divergence.", "True) self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim is not None: divergence =", "\"\"\" from typing import Optional, List, Dict, Tuple import sympy import torch from", "input_var) @property def _symbol(self): return sympy.Symbol(\"D_{{KL}} \\\\left[{}||{} \\\\right]\".format( self.p.prob_text, self.q.prob_text)) def _get_eval(self, x_dict:", "categorical distributions. Args: p (pixyz.distributions.distributions.Distribution): Distribution class. q (pixyz.distributions.distributions.Distribution): Distribution class. input_var (list,", "__init__(self, p: Distribution, q: Distribution, input_var: Optional[List[str]] = None, dim: Optional[int] = None):", "KL divergence KL loss for Categorical and RelaxedCategorical ref) KL divergence in PyTorch", "Dict, Tuple import sympy import torch from torch._six import inf from pixyz.distributions import", "Tuple[torch.Tensor, Dict[str, torch.Tensor]]: if (not hasattr(self.p, 'distribution_torch_class')) \\ or (not hasattr(self.q, 'distribution_torch_class')): raise", "self.q.input_var, True) self.q.set_dist(input_dict) divergence = _kl_categorical_categorical(self.p.dist, self.q.dist) if self.dim is not None: divergence", "t (torch.Tensor): Calculated KL divergence. \"\"\" t = p.probs * (p.logits - q.logits)", "Leibler divergence for categorical distributions. Args: p (pixyz.distributions.distributions.Distribution): Distribution class. q (pixyz.distributions.distributions.Distribution): Distribution", "optional): Input variable name. dim (int, optional): Aggregate dimension. \"\"\" def __init__(self, p:", "PyTorch https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence \"\"\" from typing import Optional, List, Dict, Tuple import sympy import", "def _kl_categorical_categorical(p: torch.distributions.Distribution, q: torch.distributions.Distribution ) -> torch.Tensor: \"\"\"KL divergence between categorical and", "if self.dim is not None: divergence = torch.sum(divergence, dim=self.dim) return divergence, x_dict dim_list", "divergence between categorical and categorical, KL(p||q). Args: p (torch.distributions.Distribution): PyTorch Distribution class. q", "t.sum(-1) class CategoricalKullbackLeibler(Loss): \"\"\"Kullback Leibler divergence for categorical distributions. Args: p (pixyz.distributions.distributions.Distribution): Distribution" ]
[ "a string while len(ints) > n: next_sequence = ints[:n] zero = next_sequence.find('0') if", "a, b: a*int(b), next_sequence, 1) ints = ints[1:] def main(): n = int(sys.argv[1])", "main(): n = int(sys.argv[1]) ints = ''.join(open('pe8.txt').read().split('\\n')) print max(yield_next_product(ints, n)) if __name__ ==", "1:] else: yield reduce(lambda a, b: a*int(b), next_sequence, 1) ints = ints[1:] def", "is a string while len(ints) > n: next_sequence = ints[:n] zero = next_sequence.find('0')", "zero > -1: ints = ints[zero + 1:] else: yield reduce(lambda a, b:", "string while len(ints) > n: next_sequence = ints[:n] zero = next_sequence.find('0') if zero", "import sys def yield_next_product(ints, n): # ints is a string while len(ints) >", "> -1: ints = ints[zero + 1:] else: yield reduce(lambda a, b: a*int(b),", "if zero > -1: ints = ints[zero + 1:] else: yield reduce(lambda a,", "# ints is a string while len(ints) > n: next_sequence = ints[:n] zero", "ints[:n] zero = next_sequence.find('0') if zero > -1: ints = ints[zero + 1:]", "next_sequence.find('0') if zero > -1: ints = ints[zero + 1:] else: yield reduce(lambda", "def main(): n = int(sys.argv[1]) ints = ''.join(open('pe8.txt').read().split('\\n')) print max(yield_next_product(ints, n)) if __name__", "else: yield reduce(lambda a, b: a*int(b), next_sequence, 1) ints = ints[1:] def main():", "ints[1:] def main(): n = int(sys.argv[1]) ints = ''.join(open('pe8.txt').read().split('\\n')) print max(yield_next_product(ints, n)) if", "while len(ints) > n: next_sequence = ints[:n] zero = next_sequence.find('0') if zero >", "ints[zero + 1:] else: yield reduce(lambda a, b: a*int(b), next_sequence, 1) ints =", "n): # ints is a string while len(ints) > n: next_sequence = ints[:n]", "len(ints) > n: next_sequence = ints[:n] zero = next_sequence.find('0') if zero > -1:", "ints is a string while len(ints) > n: next_sequence = ints[:n] zero =", "-1: ints = ints[zero + 1:] else: yield reduce(lambda a, b: a*int(b), next_sequence,", "a*int(b), next_sequence, 1) ints = ints[1:] def main(): n = int(sys.argv[1]) ints =", "yield_next_product(ints, n): # ints is a string while len(ints) > n: next_sequence =", "next_sequence, 1) ints = ints[1:] def main(): n = int(sys.argv[1]) ints = ''.join(open('pe8.txt').read().split('\\n'))", "def yield_next_product(ints, n): # ints is a string while len(ints) > n: next_sequence", "reduce(lambda a, b: a*int(b), next_sequence, 1) ints = ints[1:] def main(): n =", "ints = ints[zero + 1:] else: yield reduce(lambda a, b: a*int(b), next_sequence, 1)", "= ints[zero + 1:] else: yield reduce(lambda a, b: a*int(b), next_sequence, 1) ints", "b: a*int(b), next_sequence, 1) ints = ints[1:] def main(): n = int(sys.argv[1]) ints", "= ints[1:] def main(): n = int(sys.argv[1]) ints = ''.join(open('pe8.txt').read().split('\\n')) print max(yield_next_product(ints, n))", "1) ints = ints[1:] def main(): n = int(sys.argv[1]) ints = ''.join(open('pe8.txt').read().split('\\n')) print", "+ 1:] else: yield reduce(lambda a, b: a*int(b), next_sequence, 1) ints = ints[1:]", "sys def yield_next_product(ints, n): # ints is a string while len(ints) > n:", "= next_sequence.find('0') if zero > -1: ints = ints[zero + 1:] else: yield", "yield reduce(lambda a, b: a*int(b), next_sequence, 1) ints = ints[1:] def main(): n", "next_sequence = ints[:n] zero = next_sequence.find('0') if zero > -1: ints = ints[zero", "= int(sys.argv[1]) ints = ''.join(open('pe8.txt').read().split('\\n')) print max(yield_next_product(ints, n)) if __name__ == '__main__': main()", "zero = next_sequence.find('0') if zero > -1: ints = ints[zero + 1:] else:", "n = int(sys.argv[1]) ints = ''.join(open('pe8.txt').read().split('\\n')) print max(yield_next_product(ints, n)) if __name__ == '__main__':", "> n: next_sequence = ints[:n] zero = next_sequence.find('0') if zero > -1: ints", "ints = ints[1:] def main(): n = int(sys.argv[1]) ints = ''.join(open('pe8.txt').read().split('\\n')) print max(yield_next_product(ints,", "n: next_sequence = ints[:n] zero = next_sequence.find('0') if zero > -1: ints =", "= ints[:n] zero = next_sequence.find('0') if zero > -1: ints = ints[zero +" ]
[ "port') p.add_argument('-d', '--debug', action='store_true') return p.parse_args() def main(): setup_paths() setup_gevent() args = parse_args()", "'..')) py_version = '%d.%d' % (sys.version_info[0], sys.version_info[1]) # Setup virtual environment venv_path =", "site.addsitedir(venv_path) # Setup python path sys.path.append(base_dir) # Reorder sys path new_sys_path = [p", "from gevent.wsgi import WSGIServer from django.core.handlers.wsgi import WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever() def parse_args():", "= os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version) print >> sys.stderr, 'virtual env path:', venv_path site.addsitedir(venv_path)", "\"\"\" Django gevent server for axes research \"\"\" import os, sys def setup_paths():", "axes research \"\"\" import os, sys def setup_paths(): import site old_sys_path = list(sys.path)", "port), WSGIHandler()).serve_forever() def parse_args(): import argparse p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int, default=8088,", "setup_gevent() args = parse_args() setup_app(args.debug) servertype = 'debug' if args.debug else 'production' print", "server for axes research \"\"\" import os, sys def setup_paths(): import site old_sys_path", "# Setup python path sys.path.append(base_dir) # Reorder sys path new_sys_path = [p for", "sys.path.append(base_dir) # Reorder sys path new_sys_path = [p for p in sys.path if", "p.add_argument('-p', '--port', type=int, default=8088, help='TCP port') p.add_argument('-d', '--debug', action='store_true') return p.parse_args() def main():", "sys def setup_paths(): import site old_sys_path = list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version", "'virtual env path:', venv_path site.addsitedir(venv_path) # Setup python path sys.path.append(base_dir) # Reorder sys", "action='store_true') return p.parse_args() def main(): setup_paths() setup_gevent() args = parse_args() setup_app(args.debug) servertype =", "args.debug else 'production' print 'Starting', servertype, 'server on port', args.port start_server(args.port) if __name__", "setup_app(args.debug) servertype = 'debug' if args.debug else 'production' print 'Starting', servertype, 'server on", "for axes research \"\"\" import os, sys def setup_paths(): import site old_sys_path =", "= parse_args() setup_app(args.debug) servertype = 'debug' if args.debug else 'production' print 'Starting', servertype,", "p.add_argument('-d', '--debug', action='store_true') return p.parse_args() def main(): setup_paths() setup_gevent() args = parse_args() setup_app(args.debug)", "new_sys_path = [p for p in sys.path if p not in old_sys_path] for", "python path sys.path.append(base_dir) # Reorder sys path new_sys_path = [p for p in", "= 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def start_server(port, host=''): from gevent.wsgi import WSGIServer", "% (sys.version_info[0], sys.version_info[1]) # Setup virtual environment venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version)", "gevent import monkey monkey.patch_all() def setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE']", "monkey.patch_all() def setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def", "new_sys_path: sys.path.remove(item) sys.path[:0] = new_sys_path def setup_gevent(): from gevent import monkey monkey.patch_all() def", "main(): setup_paths() setup_gevent() args = parse_args() setup_app(args.debug) servertype = 'debug' if args.debug else", "'venv/lib/python%s/site-packages' % py_version) print >> sys.stderr, 'virtual env path:', venv_path site.addsitedir(venv_path) # Setup", "for item in new_sys_path: sys.path.remove(item) sys.path[:0] = new_sys_path def setup_gevent(): from gevent import", "p in sys.path if p not in old_sys_path] for item in new_sys_path: sys.path.remove(item)", "django.core.handlers.wsgi import WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever() def parse_args(): import argparse p = argparse.ArgumentParser(description=__doc__.strip())", "py_version) print >> sys.stderr, 'virtual env path:', venv_path site.addsitedir(venv_path) # Setup python path", "if p not in old_sys_path] for item in new_sys_path: sys.path.remove(item) sys.path[:0] = new_sys_path", "debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def start_server(port, host=''): from gevent.wsgi", "python \"\"\" Django gevent server for axes research \"\"\" import os, sys def", "not in old_sys_path] for item in new_sys_path: sys.path.remove(item) sys.path[:0] = new_sys_path def setup_gevent():", "default=8088, help='TCP port') p.add_argument('-d', '--debug', action='store_true') return p.parse_args() def main(): setup_paths() setup_gevent() args", "WSGIServer from django.core.handlers.wsgi import WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever() def parse_args(): import argparse p", "# Reorder sys path new_sys_path = [p for p in sys.path if p", "WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever() def parse_args(): import argparse p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port',", "'debug' if args.debug else 'production' print 'Starting', servertype, 'server on port', args.port start_server(args.port)", "os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version = '%d.%d' % (sys.version_info[0], sys.version_info[1]) # Setup virtual environment venv_path", "servertype = 'debug' if args.debug else 'production' print 'Starting', servertype, 'server on port',", "gevent.wsgi import WSGIServer from django.core.handlers.wsgi import WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever() def parse_args(): import", "Setup virtual environment venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version) print >> sys.stderr, 'virtual", "def start_server(port, host=''): from gevent.wsgi import WSGIServer from django.core.handlers.wsgi import WSGIHandler WSGIServer((host, port),", "else 'production' print 'Starting', servertype, 'server on port', args.port start_server(args.port) if __name__ ==", "for p in sys.path if p not in old_sys_path] for item in new_sys_path:", "argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int, default=8088, help='TCP port') p.add_argument('-d', '--debug', action='store_true') return p.parse_args() def", "venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version) print >> sys.stderr, 'virtual env path:', venv_path", "Reorder sys path new_sys_path = [p for p in sys.path if p not", "'production' print 'Starting', servertype, 'server on port', args.port start_server(args.port) if __name__ == '__main__':", "sys.path[:0] = new_sys_path def setup_gevent(): from gevent import monkey monkey.patch_all() def setup_app(debug): if", "import WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever() def parse_args(): import argparse p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p',", "item in new_sys_path: sys.path.remove(item) sys.path[:0] = new_sys_path def setup_gevent(): from gevent import monkey", "gevent server for axes research \"\"\" import os, sys def setup_paths(): import site", "setup_gevent(): from gevent import monkey monkey.patch_all() def setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings'", "\"\"\" import os, sys def setup_paths(): import site old_sys_path = list(sys.path) base_dir =", "type=int, default=8088, help='TCP port') p.add_argument('-d', '--debug', action='store_true') return p.parse_args() def main(): setup_paths() setup_gevent()", "sys path new_sys_path = [p for p in sys.path if p not in", "def setup_gevent(): from gevent import monkey monkey.patch_all() def setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE'] =", "'--debug', action='store_true') return p.parse_args() def main(): setup_paths() setup_gevent() args = parse_args() setup_app(args.debug) servertype", "virtual environment venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version) print >> sys.stderr, 'virtual env", "if args.debug else 'production' print 'Starting', servertype, 'server on port', args.port start_server(args.port) if", "os, sys def setup_paths(): import site old_sys_path = list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))", "# Setup virtual environment venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version) print >> sys.stderr,", "WSGIHandler()).serve_forever() def parse_args(): import argparse p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int, default=8088, help='TCP", "<reponame>kevinmcguinness/axes-research #!/usr/bin/env python \"\"\" Django gevent server for axes research \"\"\" import os,", "path:', venv_path site.addsitedir(venv_path) # Setup python path sys.path.append(base_dir) # Reorder sys path new_sys_path", "= list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version = '%d.%d' % (sys.version_info[0], sys.version_info[1]) #", "site old_sys_path = list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version = '%d.%d' % (sys.version_info[0],", "py_version = '%d.%d' % (sys.version_info[0], sys.version_info[1]) # Setup virtual environment venv_path = os.path.join(base_dir,", "'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def start_server(port, host=''): from gevent.wsgi import WSGIServer from", "'%d.%d' % (sys.version_info[0], sys.version_info[1]) # Setup virtual environment venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages' %", "monkey monkey.patch_all() def setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production'", "argparse p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int, default=8088, help='TCP port') p.add_argument('-d', '--debug', action='store_true')", "if debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def start_server(port, host=''): from", "path sys.path.append(base_dir) # Reorder sys path new_sys_path = [p for p in sys.path", "= argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int, default=8088, help='TCP port') p.add_argument('-d', '--debug', action='store_true') return p.parse_args()", "Setup python path sys.path.append(base_dir) # Reorder sys path new_sys_path = [p for p", "WSGIServer((host, port), WSGIHandler()).serve_forever() def parse_args(): import argparse p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int,", "venv_path site.addsitedir(venv_path) # Setup python path sys.path.append(base_dir) # Reorder sys path new_sys_path =", "p.parse_args() def main(): setup_paths() setup_gevent() args = parse_args() setup_app(args.debug) servertype = 'debug' if", "from django.core.handlers.wsgi import WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever() def parse_args(): import argparse p =", "'--port', type=int, default=8088, help='TCP port') p.add_argument('-d', '--debug', action='store_true') return p.parse_args() def main(): setup_paths()", "new_sys_path def setup_gevent(): from gevent import monkey monkey.patch_all() def setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE']", "print >> sys.stderr, 'virtual env path:', venv_path site.addsitedir(venv_path) # Setup python path sys.path.append(base_dir)", "environment venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version) print >> sys.stderr, 'virtual env path:',", "in new_sys_path: sys.path.remove(item) sys.path[:0] = new_sys_path def setup_gevent(): from gevent import monkey monkey.patch_all()", "args = parse_args() setup_app(args.debug) servertype = 'debug' if args.debug else 'production' print 'Starting',", "os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def start_server(port, host=''): from gevent.wsgi import", "from gevent import monkey monkey.patch_all() def setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else:", "def setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def start_server(port,", ">> sys.stderr, 'virtual env path:', venv_path site.addsitedir(venv_path) # Setup python path sys.path.append(base_dir) #", "parse_args(): import argparse p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int, default=8088, help='TCP port') p.add_argument('-d',", "def setup_paths(): import site old_sys_path = list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version =", "import WSGIServer from django.core.handlers.wsgi import WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever() def parse_args(): import argparse", "def main(): setup_paths() setup_gevent() args = parse_args() setup_app(args.debug) servertype = 'debug' if args.debug", "setup_paths() setup_gevent() args = parse_args() setup_app(args.debug) servertype = 'debug' if args.debug else 'production'", "help='TCP port') p.add_argument('-d', '--debug', action='store_true') return p.parse_args() def main(): setup_paths() setup_gevent() args =", "import argparse p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int, default=8088, help='TCP port') p.add_argument('-d', '--debug',", "'axesresearch.settings.production' def start_server(port, host=''): from gevent.wsgi import WSGIServer from django.core.handlers.wsgi import WSGIHandler WSGIServer((host,", "print 'Starting', servertype, 'server on port', args.port start_server(args.port) if __name__ == '__main__': main()", "env path:', venv_path site.addsitedir(venv_path) # Setup python path sys.path.append(base_dir) # Reorder sys path", "= new_sys_path def setup_gevent(): from gevent import monkey monkey.patch_all() def setup_app(debug): if debug:", "(sys.version_info[0], sys.version_info[1]) # Setup virtual environment venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version) print", "import site old_sys_path = list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version = '%d.%d' %", "list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version = '%d.%d' % (sys.version_info[0], sys.version_info[1]) # Setup", "sys.path.remove(item) sys.path[:0] = new_sys_path def setup_gevent(): from gevent import monkey monkey.patch_all() def setup_app(debug):", "os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def start_server(port, host=''): from gevent.wsgi import WSGIServer from django.core.handlers.wsgi import", "#!/usr/bin/env python \"\"\" Django gevent server for axes research \"\"\" import os, sys", "sys.version_info[1]) # Setup virtual environment venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version) print >>", "[p for p in sys.path if p not in old_sys_path] for item in", "research \"\"\" import os, sys def setup_paths(): import site old_sys_path = list(sys.path) base_dir", "Django gevent server for axes research \"\"\" import os, sys def setup_paths(): import", "sys.stderr, 'virtual env path:', venv_path site.addsitedir(venv_path) # Setup python path sys.path.append(base_dir) # Reorder", "os.path.join(base_dir, 'venv/lib/python%s/site-packages' % py_version) print >> sys.stderr, 'virtual env path:', venv_path site.addsitedir(venv_path) #", "% py_version) print >> sys.stderr, 'virtual env path:', venv_path site.addsitedir(venv_path) # Setup python", "else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def start_server(port, host=''): from gevent.wsgi import WSGIServer from django.core.handlers.wsgi", "parse_args() setup_app(args.debug) servertype = 'debug' if args.debug else 'production' print 'Starting', servertype, 'server", "base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version = '%d.%d' % (sys.version_info[0], sys.version_info[1]) # Setup virtual", "in sys.path if p not in old_sys_path] for item in new_sys_path: sys.path.remove(item) sys.path[:0]", "start_server(port, host=''): from gevent.wsgi import WSGIServer from django.core.handlers.wsgi import WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever()", "old_sys_path = list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version = '%d.%d' % (sys.version_info[0], sys.version_info[1])", "= 'debug' if args.debug else 'production' print 'Starting', servertype, 'server on port', args.port", "p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int, default=8088, help='TCP port') p.add_argument('-d', '--debug', action='store_true') return", "path new_sys_path = [p for p in sys.path if p not in old_sys_path]", "= '%d.%d' % (sys.version_info[0], sys.version_info[1]) # Setup virtual environment venv_path = os.path.join(base_dir, 'venv/lib/python%s/site-packages'", "def parse_args(): import argparse p = argparse.ArgumentParser(description=__doc__.strip()) p.add_argument('-p', '--port', type=int, default=8088, help='TCP port')", "import monkey monkey.patch_all() def setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] =", "= 'axesresearch.settings.production' def start_server(port, host=''): from gevent.wsgi import WSGIServer from django.core.handlers.wsgi import WSGIHandler", "in old_sys_path] for item in new_sys_path: sys.path.remove(item) sys.path[:0] = new_sys_path def setup_gevent(): from", "old_sys_path] for item in new_sys_path: sys.path.remove(item) sys.path[:0] = new_sys_path def setup_gevent(): from gevent", "p not in old_sys_path] for item in new_sys_path: sys.path.remove(item) sys.path[:0] = new_sys_path def", "setup_app(debug): if debug: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings' else: os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production' def start_server(port, host=''):", "return p.parse_args() def main(): setup_paths() setup_gevent() args = parse_args() setup_app(args.debug) servertype = 'debug'", "= os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version = '%d.%d' % (sys.version_info[0], sys.version_info[1]) # Setup virtual environment", "host=''): from gevent.wsgi import WSGIServer from django.core.handlers.wsgi import WSGIHandler WSGIServer((host, port), WSGIHandler()).serve_forever() def", "= [p for p in sys.path if p not in old_sys_path] for item", "import os, sys def setup_paths(): import site old_sys_path = list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),", "sys.path if p not in old_sys_path] for item in new_sys_path: sys.path.remove(item) sys.path[:0] =", "setup_paths(): import site old_sys_path = list(sys.path) base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) py_version = '%d.%d'" ]