ngram
listlengths
0
67.8k
[ "right: mid = (left + right) // 2 total = 0 for cap", "ans = 0, sum(batteries) // n, 0 while left <= right: mid =", "class Solution: def maxRunTime(self, n: int, batteries: List[int]) -> int: left, right, ans", "n, 0 while left <= right: mid = (left + right) // 2", "total += min(cap, mid) if total >= n * mid: ans = mid", "0 while left <= right: mid = (left + right) // 2 total", "sum(batteries) // n, 0 while left <= right: mid = (left + right)", "2 total = 0 for cap in batteries: total += min(cap, mid) if", "ans = mid left = mid + 1 else: right = mid -1", "= (left + right) // 2 total = 0 for cap in batteries:", "int, batteries: List[int]) -> int: left, right, ans = 0, sum(batteries) // n,", "mid: ans = mid left = mid + 1 else: right = mid", "maxRunTime(self, n: int, batteries: List[int]) -> int: left, right, ans = 0, sum(batteries)", "right, ans = 0, sum(batteries) // n, 0 while left <= right: mid", "+ right) // 2 total = 0 for cap in batteries: total +=", "= 0, sum(batteries) // n, 0 while left <= right: mid = (left", "0, sum(batteries) // n, 0 while left <= right: mid = (left +", "batteries: total += min(cap, mid) if total >= n * mid: ans =", "if total >= n * mid: ans = mid left = mid +", "cap in batteries: total += min(cap, mid) if total >= n * mid:", ">= n * mid: ans = mid left = mid + 1 else:", "-> int: left, right, ans = 0, sum(batteries) // n, 0 while left", "while left <= right: mid = (left + right) // 2 total =", "Solution: def maxRunTime(self, n: int, batteries: List[int]) -> int: left, right, ans =", "(left + right) // 2 total = 0 for cap in batteries: total", "for cap in batteries: total += min(cap, mid) if total >= n *", "+= min(cap, mid) if total >= n * mid: ans = mid left", "min(cap, mid) if total >= n * mid: ans = mid left =", "total = 0 for cap in batteries: total += min(cap, mid) if total", "in batteries: total += min(cap, mid) if total >= n * mid: ans", "n * mid: ans = mid left = mid + 1 else: right", "= mid left = mid + 1 else: right = mid -1 return", "def maxRunTime(self, n: int, batteries: List[int]) -> int: left, right, ans = 0,", "left, right, ans = 0, sum(batteries) // n, 0 while left <= right:", "right) // 2 total = 0 for cap in batteries: total += min(cap,", "mid) if total >= n * mid: ans = mid left = mid", "0 for cap in batteries: total += min(cap, mid) if total >= n", "mid left = mid + 1 else: right = mid -1 return ans", "total >= n * mid: ans = mid left = mid + 1", "List[int]) -> int: left, right, ans = 0, sum(batteries) // n, 0 while", "n: int, batteries: List[int]) -> int: left, right, ans = 0, sum(batteries) //", "// 2 total = 0 for cap in batteries: total += min(cap, mid)", "int: left, right, ans = 0, sum(batteries) // n, 0 while left <=", "mid = (left + right) // 2 total = 0 for cap in", "// n, 0 while left <= right: mid = (left + right) //", "batteries: List[int]) -> int: left, right, ans = 0, sum(batteries) // n, 0", "= 0 for cap in batteries: total += min(cap, mid) if total >=", "<= right: mid = (left + right) // 2 total = 0 for", "left <= right: mid = (left + right) // 2 total = 0", "* mid: ans = mid left = mid + 1 else: right =" ]
[ "cellPos[0] <= screenBottom: nextCell = (cellPos[0] + 1, cellPos[1]) if nextCell not in", "in self.dictionary: self.dictionary[nextCell] = (randint(0,4), 255) # Deleting cells if cellOpacity < tailSize:", "= self.dictionary[cellPos][1] #Update Cell opacity = cellOpacity - tailSize if cellOpacity >= tailSize", "10 keys = self.dictionary.keys() for cellPos in list(reversed(keys)): cellIMG = self.dictionary[cellPos][0] cellOpacity =", "<= 1 else randint(0,4) self.dictionary[cellPos] = (cellIMG, opacity) # Add white to next", "if cellPos[0] <= screenBottom: nextCell = (cellPos[0] + 1, cellPos[1]) if nextCell not", "column = random.randint(screenLeft, screenRight) self.dictionary[screenTop, column] = (1, 255) def update(self, screenBottom): tailSize", "self.dictionary[screenTop, column] = (1, 255) def update(self, screenBottom): tailSize = 10 keys =", "white to next bottom if cellPos[0] <= screenBottom: nextCell = (cellPos[0] + 1,", "if cellOpacity >= tailSize else 0 cellIMG = cellIMG if randint(0, (opacity//20)**2) <=", "(cellIMG, opacity) # Add white to next bottom if cellPos[0] <= screenBottom: nextCell", "cellOpacity = self.dictionary[cellPos][1] #Update Cell opacity = cellOpacity - tailSize if cellOpacity >=", "- 1 screenLeft = -width // 2 screenRight = width // 2 column", "if randint(0, (opacity//20)**2) <= 1 else randint(0,4) self.dictionary[cellPos] = (cellIMG, opacity) # Add", "tailSize else 0 cellIMG = cellIMG if randint(0, (opacity//20)**2) <= 1 else randint(0,4)", "= (cellIMG, opacity) # Add white to next bottom if cellPos[0] <= screenBottom:", "top): screenTop = top - 1 screenLeft = -width // 2 screenRight =", "= (randint(0,4), 255) # Deleting cells if cellOpacity < tailSize: #del temp[cellPos] self.dictionary.pop(cellPos)", ">= tailSize else 0 cellIMG = cellIMG if randint(0, (opacity//20)**2) <= 1 else", "opacity = cellOpacity - tailSize if cellOpacity >= tailSize else 0 cellIMG =", "def addDrop(self, width, top): screenTop = top - 1 screenLeft = -width //", "#Update Cell opacity = cellOpacity - tailSize if cellOpacity >= tailSize else 0", "// 2 screenRight = width // 2 column = random.randint(screenLeft, screenRight) self.dictionary[screenTop, column]", "keys = self.dictionary.keys() for cellPos in list(reversed(keys)): cellIMG = self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1]", "= cellIMG if randint(0, (opacity//20)**2) <= 1 else randint(0,4) self.dictionary[cellPos] = (cellIMG, opacity)", "randint(0, (opacity//20)**2) <= 1 else randint(0,4) self.dictionary[cellPos] = (cellIMG, opacity) # Add white", "cellOpacity - tailSize if cellOpacity >= tailSize else 0 cellIMG = cellIMG if", "# Add white to next bottom if cellPos[0] <= screenBottom: nextCell = (cellPos[0]", "#{(row, col): (pieceIMG, brightness)} def addDrop(self, width, top): screenTop = top - 1", "1, cellPos[1]) if nextCell not in self.dictionary: self.dictionary[nextCell] = (randint(0,4), 255) # Deleting", "= (1, 255) def update(self, screenBottom): tailSize = 10 keys = self.dictionary.keys() for", "Cell opacity = cellOpacity - tailSize if cellOpacity >= tailSize else 0 cellIMG", "import randint class State(): def __init__(self): self.dictionary = {} #{(row, col): (pieceIMG, brightness)}", "{} #{(row, col): (pieceIMG, brightness)} def addDrop(self, width, top): screenTop = top -", "screenLeft = -width // 2 screenRight = width // 2 column = random.randint(screenLeft,", "addDrop(self, width, top): screenTop = top - 1 screenLeft = -width // 2", "self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1] #Update Cell opacity = cellOpacity - tailSize if cellOpacity", "self.dictionary[nextCell] = (randint(0,4), 255) # Deleting cells if cellOpacity < tailSize: #del temp[cellPos]", "bottom if cellPos[0] <= screenBottom: nextCell = (cellPos[0] + 1, cellPos[1]) if nextCell", "class State(): def __init__(self): self.dictionary = {} #{(row, col): (pieceIMG, brightness)} def addDrop(self,", "def update(self, screenBottom): tailSize = 10 keys = self.dictionary.keys() for cellPos in list(reversed(keys)):", "2 screenRight = width // 2 column = random.randint(screenLeft, screenRight) self.dictionary[screenTop, column] =", "cellOpacity >= tailSize else 0 cellIMG = cellIMG if randint(0, (opacity//20)**2) <= 1", "nextCell not in self.dictionary: self.dictionary[nextCell] = (randint(0,4), 255) # Deleting cells if cellOpacity", "column] = (1, 255) def update(self, screenBottom): tailSize = 10 keys = self.dictionary.keys()", "cellIMG = self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1] #Update Cell opacity = cellOpacity - tailSize", "not in self.dictionary: self.dictionary[nextCell] = (randint(0,4), 255) # Deleting cells if cellOpacity <", "brightness)} def addDrop(self, width, top): screenTop = top - 1 screenLeft = -width", "0 cellIMG = cellIMG if randint(0, (opacity//20)**2) <= 1 else randint(0,4) self.dictionary[cellPos] =", "random from random import randint class State(): def __init__(self): self.dictionary = {} #{(row,", "opacity) # Add white to next bottom if cellPos[0] <= screenBottom: nextCell =", "(opacity//20)**2) <= 1 else randint(0,4) self.dictionary[cellPos] = (cellIMG, opacity) # Add white to", "= (cellPos[0] + 1, cellPos[1]) if nextCell not in self.dictionary: self.dictionary[nextCell] = (randint(0,4),", "screenRight) self.dictionary[screenTop, column] = (1, 255) def update(self, screenBottom): tailSize = 10 keys", "<= screenBottom: nextCell = (cellPos[0] + 1, cellPos[1]) if nextCell not in self.dictionary:", "for cellPos in list(reversed(keys)): cellIMG = self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1] #Update Cell opacity", "self.dictionary.keys() for cellPos in list(reversed(keys)): cellIMG = self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1] #Update Cell", "tailSize if cellOpacity >= tailSize else 0 cellIMG = cellIMG if randint(0, (opacity//20)**2)", "screenRight = width // 2 column = random.randint(screenLeft, screenRight) self.dictionary[screenTop, column] = (1,", "width // 2 column = random.randint(screenLeft, screenRight) self.dictionary[screenTop, column] = (1, 255) def", "= self.dictionary.keys() for cellPos in list(reversed(keys)): cellIMG = self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1] #Update", "screenBottom: nextCell = (cellPos[0] + 1, cellPos[1]) if nextCell not in self.dictionary: self.dictionary[nextCell]", "import random from random import randint class State(): def __init__(self): self.dictionary = {}", "next bottom if cellPos[0] <= screenBottom: nextCell = (cellPos[0] + 1, cellPos[1]) if", "= self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1] #Update Cell opacity = cellOpacity - tailSize if", "def __init__(self): self.dictionary = {} #{(row, col): (pieceIMG, brightness)} def addDrop(self, width, top):", "= -width // 2 screenRight = width // 2 column = random.randint(screenLeft, screenRight)", "= 10 keys = self.dictionary.keys() for cellPos in list(reversed(keys)): cellIMG = self.dictionary[cellPos][0] cellOpacity", "self.dictionary = {} #{(row, col): (pieceIMG, brightness)} def addDrop(self, width, top): screenTop =", "255) def update(self, screenBottom): tailSize = 10 keys = self.dictionary.keys() for cellPos in", "-width // 2 screenRight = width // 2 column = random.randint(screenLeft, screenRight) self.dictionary[screenTop,", "screenTop = top - 1 screenLeft = -width // 2 screenRight = width", "if nextCell not in self.dictionary: self.dictionary[nextCell] = (randint(0,4), 255) # Deleting cells if", "= cellOpacity - tailSize if cellOpacity >= tailSize else 0 cellIMG = cellIMG", "(1, 255) def update(self, screenBottom): tailSize = 10 keys = self.dictionary.keys() for cellPos", "tailSize = 10 keys = self.dictionary.keys() for cellPos in list(reversed(keys)): cellIMG = self.dictionary[cellPos][0]", "self.dictionary[cellPos] = (cellIMG, opacity) # Add white to next bottom if cellPos[0] <=", "(pieceIMG, brightness)} def addDrop(self, width, top): screenTop = top - 1 screenLeft =", "__init__(self): self.dictionary = {} #{(row, col): (pieceIMG, brightness)} def addDrop(self, width, top): screenTop", "self.dictionary[cellPos][1] #Update Cell opacity = cellOpacity - tailSize if cellOpacity >= tailSize else", "screenBottom): tailSize = 10 keys = self.dictionary.keys() for cellPos in list(reversed(keys)): cellIMG =", "cellIMG if randint(0, (opacity//20)**2) <= 1 else randint(0,4) self.dictionary[cellPos] = (cellIMG, opacity) #", "random.randint(screenLeft, screenRight) self.dictionary[screenTop, column] = (1, 255) def update(self, screenBottom): tailSize = 10", "from random import randint class State(): def __init__(self): self.dictionary = {} #{(row, col):", "list(reversed(keys)): cellIMG = self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1] #Update Cell opacity = cellOpacity -", "= random.randint(screenLeft, screenRight) self.dictionary[screenTop, column] = (1, 255) def update(self, screenBottom): tailSize =", "= top - 1 screenLeft = -width // 2 screenRight = width //", "cellIMG = cellIMG if randint(0, (opacity//20)**2) <= 1 else randint(0,4) self.dictionary[cellPos] = (cellIMG,", "= {} #{(row, col): (pieceIMG, brightness)} def addDrop(self, width, top): screenTop = top", "// 2 column = random.randint(screenLeft, screenRight) self.dictionary[screenTop, column] = (1, 255) def update(self,", "(cellPos[0] + 1, cellPos[1]) if nextCell not in self.dictionary: self.dictionary[nextCell] = (randint(0,4), 255)", "to next bottom if cellPos[0] <= screenBottom: nextCell = (cellPos[0] + 1, cellPos[1])", "cellPos in list(reversed(keys)): cellIMG = self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1] #Update Cell opacity =", "nextCell = (cellPos[0] + 1, cellPos[1]) if nextCell not in self.dictionary: self.dictionary[nextCell] =", "random import randint class State(): def __init__(self): self.dictionary = {} #{(row, col): (pieceIMG,", "col): (pieceIMG, brightness)} def addDrop(self, width, top): screenTop = top - 1 screenLeft", "else randint(0,4) self.dictionary[cellPos] = (cellIMG, opacity) # Add white to next bottom if", "State(): def __init__(self): self.dictionary = {} #{(row, col): (pieceIMG, brightness)} def addDrop(self, width,", "2 column = random.randint(screenLeft, screenRight) self.dictionary[screenTop, column] = (1, 255) def update(self, screenBottom):", "= width // 2 column = random.randint(screenLeft, screenRight) self.dictionary[screenTop, column] = (1, 255)", "- tailSize if cellOpacity >= tailSize else 0 cellIMG = cellIMG if randint(0,", "1 screenLeft = -width // 2 screenRight = width // 2 column =", "randint class State(): def __init__(self): self.dictionary = {} #{(row, col): (pieceIMG, brightness)} def", "self.dictionary: self.dictionary[nextCell] = (randint(0,4), 255) # Deleting cells if cellOpacity < tailSize: #del", "else 0 cellIMG = cellIMG if randint(0, (opacity//20)**2) <= 1 else randint(0,4) self.dictionary[cellPos]", "cellPos[1]) if nextCell not in self.dictionary: self.dictionary[nextCell] = (randint(0,4), 255) # Deleting cells", "+ 1, cellPos[1]) if nextCell not in self.dictionary: self.dictionary[nextCell] = (randint(0,4), 255) #", "in list(reversed(keys)): cellIMG = self.dictionary[cellPos][0] cellOpacity = self.dictionary[cellPos][1] #Update Cell opacity = cellOpacity", "Add white to next bottom if cellPos[0] <= screenBottom: nextCell = (cellPos[0] +", "randint(0,4) self.dictionary[cellPos] = (cellIMG, opacity) # Add white to next bottom if cellPos[0]", "update(self, screenBottom): tailSize = 10 keys = self.dictionary.keys() for cellPos in list(reversed(keys)): cellIMG", "1 else randint(0,4) self.dictionary[cellPos] = (cellIMG, opacity) # Add white to next bottom", "top - 1 screenLeft = -width // 2 screenRight = width // 2", "width, top): screenTop = top - 1 screenLeft = -width // 2 screenRight" ]
[ "# set config app.logger.setLevel(logging.INFO) from src.controller import excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def healthcheck():", "'payload', None) or ()) response_dict['message'] = str(error) response_dict['traceback'] = trace response = jsonify(response_dict)", "= dict(getattr(error, 'payload', None) or ()) response_dict['message'] = str(error) response_dict['traceback'] = trace response", "__name__, template_folder='../templates' ) # set config app.logger.setLevel(logging.INFO) from src.controller import excel_service app.register_blueprint(excel_service, url_prefix='/')", "status_code = getattr(error, 'status_code', 400) response_dict = dict(getattr(error, 'payload', None) or ()) response_dict['message']", "str(error) response_dict['traceback'] = trace response = jsonify(response_dict) response.status_code = status_code traceback.print_exc(file=sys.stdout) return response", "dict(getattr(error, 'payload', None) or ()) response_dict['message'] = str(error) response_dict['traceback'] = trace response =", "response_dict['message'] = str(error) response_dict['traceback'] = trace response = jsonify(response_dict) response.status_code = status_code traceback.print_exc(file=sys.stdout)", "for flask cli @app.shell_context_processor def ctx(): return {'app': app} @app.errorhandler(Exception) def _error(error): trace", "def healthcheck(): return jsonify(\"ok\") # shell context for flask cli @app.shell_context_processor def ctx():", "flask cli @app.shell_context_processor def ctx(): return {'app': app} @app.errorhandler(Exception) def _error(error): trace =", "getattr(error, 'status_code', 400) response_dict = dict(getattr(error, 'payload', None) or ()) response_dict['message'] = str(error)", "response_dict['traceback'] = trace response = jsonify(response_dict) response.status_code = status_code traceback.print_exc(file=sys.stdout) return response return", "set config app.logger.setLevel(logging.INFO) from src.controller import excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def healthcheck(): return", "import excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def healthcheck(): return jsonify(\"ok\") # shell context for", "response_dict = dict(getattr(error, 'payload', None) or ()) response_dict['message'] = str(error) response_dict['traceback'] = trace", "400) response_dict = dict(getattr(error, 'payload', None) or ()) response_dict['message'] = str(error) response_dict['traceback'] =", "template_folder='../templates' ) # set config app.logger.setLevel(logging.INFO) from src.controller import excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck')", "_error(error): trace = traceback.format_exc() status_code = getattr(error, 'status_code', 400) response_dict = dict(getattr(error, 'payload',", "from flask import Flask, jsonify def create_app(script_info=None): # instantiate the app app =", "or ()) response_dict['message'] = str(error) response_dict['traceback'] = trace response = jsonify(response_dict) response.status_code =", "app.logger.setLevel(logging.INFO) from src.controller import excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def healthcheck(): return jsonify(\"ok\") #", "healthcheck(): return jsonify(\"ok\") # shell context for flask cli @app.shell_context_processor def ctx(): return", "jsonify def create_app(script_info=None): # instantiate the app app = Flask( __name__, template_folder='../templates' )", "sys import traceback from flask import Flask, jsonify def create_app(script_info=None): # instantiate the", "traceback.format_exc() status_code = getattr(error, 'status_code', 400) response_dict = dict(getattr(error, 'payload', None) or ())", "ctx(): return {'app': app} @app.errorhandler(Exception) def _error(error): trace = traceback.format_exc() status_code = getattr(error,", "app} @app.errorhandler(Exception) def _error(error): trace = traceback.format_exc() status_code = getattr(error, 'status_code', 400) response_dict", "def create_app(script_info=None): # instantiate the app app = Flask( __name__, template_folder='../templates' ) #", "trace = traceback.format_exc() status_code = getattr(error, 'status_code', 400) response_dict = dict(getattr(error, 'payload', None)", "Flask( __name__, template_folder='../templates' ) # set config app.logger.setLevel(logging.INFO) from src.controller import excel_service app.register_blueprint(excel_service,", "@app.route('/healthcheck') def healthcheck(): return jsonify(\"ok\") # shell context for flask cli @app.shell_context_processor def", "{'app': app} @app.errorhandler(Exception) def _error(error): trace = traceback.format_exc() status_code = getattr(error, 'status_code', 400)", "import sys import traceback from flask import Flask, jsonify def create_app(script_info=None): # instantiate", "@app.shell_context_processor def ctx(): return {'app': app} @app.errorhandler(Exception) def _error(error): trace = traceback.format_exc() status_code", "def ctx(): return {'app': app} @app.errorhandler(Exception) def _error(error): trace = traceback.format_exc() status_code =", "import traceback from flask import Flask, jsonify def create_app(script_info=None): # instantiate the app", "= trace response = jsonify(response_dict) response.status_code = status_code traceback.print_exc(file=sys.stdout) return response return app", "= Flask( __name__, template_folder='../templates' ) # set config app.logger.setLevel(logging.INFO) from src.controller import excel_service", "instantiate the app app = Flask( __name__, template_folder='../templates' ) # set config app.logger.setLevel(logging.INFO)", "app = Flask( __name__, template_folder='../templates' ) # set config app.logger.setLevel(logging.INFO) from src.controller import", "src.controller import excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def healthcheck(): return jsonify(\"ok\") # shell context", "shell context for flask cli @app.shell_context_processor def ctx(): return {'app': app} @app.errorhandler(Exception) def", "context for flask cli @app.shell_context_processor def ctx(): return {'app': app} @app.errorhandler(Exception) def _error(error):", "= str(error) response_dict['traceback'] = trace response = jsonify(response_dict) response.status_code = status_code traceback.print_exc(file=sys.stdout) return", "'status_code', 400) response_dict = dict(getattr(error, 'payload', None) or ()) response_dict['message'] = str(error) response_dict['traceback']", "= getattr(error, 'status_code', 400) response_dict = dict(getattr(error, 'payload', None) or ()) response_dict['message'] =", "@app.errorhandler(Exception) def _error(error): trace = traceback.format_exc() status_code = getattr(error, 'status_code', 400) response_dict =", "# shell context for flask cli @app.shell_context_processor def ctx(): return {'app': app} @app.errorhandler(Exception)", "return {'app': app} @app.errorhandler(Exception) def _error(error): trace = traceback.format_exc() status_code = getattr(error, 'status_code',", "import logging import sys import traceback from flask import Flask, jsonify def create_app(script_info=None):", "config app.logger.setLevel(logging.INFO) from src.controller import excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def healthcheck(): return jsonify(\"ok\")", "app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def healthcheck(): return jsonify(\"ok\") # shell context for flask cli", "the app app = Flask( __name__, template_folder='../templates' ) # set config app.logger.setLevel(logging.INFO) from", "logging import sys import traceback from flask import Flask, jsonify def create_app(script_info=None): #", "import Flask, jsonify def create_app(script_info=None): # instantiate the app app = Flask( __name__,", "Flask, jsonify def create_app(script_info=None): # instantiate the app app = Flask( __name__, template_folder='../templates'", "traceback from flask import Flask, jsonify def create_app(script_info=None): # instantiate the app app", "from src.controller import excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def healthcheck(): return jsonify(\"ok\") # shell", "excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def healthcheck(): return jsonify(\"ok\") # shell context for flask", "= traceback.format_exc() status_code = getattr(error, 'status_code', 400) response_dict = dict(getattr(error, 'payload', None) or", "url_prefix='/') @app.route('/healthcheck') def healthcheck(): return jsonify(\"ok\") # shell context for flask cli @app.shell_context_processor", "return jsonify(\"ok\") # shell context for flask cli @app.shell_context_processor def ctx(): return {'app':", "jsonify(\"ok\") # shell context for flask cli @app.shell_context_processor def ctx(): return {'app': app}", "flask import Flask, jsonify def create_app(script_info=None): # instantiate the app app = Flask(", "# instantiate the app app = Flask( __name__, template_folder='../templates' ) # set config", "cli @app.shell_context_processor def ctx(): return {'app': app} @app.errorhandler(Exception) def _error(error): trace = traceback.format_exc()", "None) or ()) response_dict['message'] = str(error) response_dict['traceback'] = trace response = jsonify(response_dict) response.status_code", "()) response_dict['message'] = str(error) response_dict['traceback'] = trace response = jsonify(response_dict) response.status_code = status_code", "app app = Flask( __name__, template_folder='../templates' ) # set config app.logger.setLevel(logging.INFO) from src.controller", "def _error(error): trace = traceback.format_exc() status_code = getattr(error, 'status_code', 400) response_dict = dict(getattr(error,", ") # set config app.logger.setLevel(logging.INFO) from src.controller import excel_service app.register_blueprint(excel_service, url_prefix='/') @app.route('/healthcheck') def", "create_app(script_info=None): # instantiate the app app = Flask( __name__, template_folder='../templates' ) # set" ]
[ "if message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr) return local_server_host = '127.0.0.1' public_server_port =", "public_server_port = message[\"public_server_port\"] private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} →", "websockets.connect(ws_uri, ssl=ssl_context) as websocket: message = json.loads(await websocket.recv()) if message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING:", "f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr) if message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr) return local_server_host", "import print as pretty_print from .tcp import Client ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async", "import ssl import json import certifi import threading import websockets from rich import", "async with websockets.connect(ws_uri, ssl=ssl_context) as websocket: message = json.loads(await websocket.recv()) if message.get(\"warning\"): pretty_print(", "ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async with websockets.connect(ws_uri, ssl=ssl_context) as websocket:", "yellow]ERROR: {message['error']}\", file=sys.stderr) return local_server_host = '127.0.0.1' public_server_port = message[\"public_server_port\"] private_server_port = message[\"private_server_port\"]", "json import certifi import threading import websockets from rich import print as pretty_print", "remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, ) while True: message = json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO:", "print as pretty_print from .tcp import Client ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def", "open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async with websockets.connect(ws_uri, ssl=ssl_context) as websocket: message = json.loads(await websocket.recv())", "= json.loads(await websocket.recv()) if message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr) if message.get(\"error\"): pretty_print(", "json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO: [bold white] New Connection +1\") threading.Thread( target=client.process, args=(message, websocket)", "= message[\"public_server_port\"] private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\")", "yellow]WARNING: {message['warning']}\", file=sys.stderr) if message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr) return local_server_host =", "message = json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO: [bold white] New Connection +1\") threading.Thread( target=client.process,", "import websockets from rich import print as pretty_print from .tcp import Client ssl_context", "pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client = Client( remote_server_host=remote_server_host, remote_server_port=private_server_port,", "as pretty_print from .tcp import Client ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri,", "websocket.recv()) if message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr) if message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR:", "<reponame>AbduazizZiyodov/jprq-python-client import sys import ssl import json import certifi import threading import websockets", "def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async with websockets.connect(ws_uri, ssl=ssl_context) as websocket: message = json.loads(await", "websocket.recv()) pretty_print(\"[bold green]INFO: [bold white] New Connection +1\") threading.Thread( target=client.process, args=(message, websocket) ).start()", "local_server_host = '127.0.0.1' public_server_port = message[\"public_server_port\"] private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print(", "127.0.0.1:{local_server_port}'}\") client = Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, ) while True: message =", "certifi import threading import websockets from rich import print as pretty_print from .tcp", "message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr) if message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr)", "threading import websockets from rich import print as pretty_print from .tcp import Client", "True: message = json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO: [bold white] New Connection +1\") threading.Thread(", "websockets from rich import print as pretty_print from .tcp import Client ssl_context =", "{message['error']}\", file=sys.stderr) return local_server_host = '127.0.0.1' public_server_port = message[\"public_server_port\"] private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel", "import json import certifi import threading import websockets from rich import print as", "as websocket: message = json.loads(await websocket.recv()) if message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr)", "remote_server_host, local_server_port): async with websockets.connect(ws_uri, ssl=ssl_context) as websocket: message = json.loads(await websocket.recv()) if", "file=sys.stderr) if message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr) return local_server_host = '127.0.0.1' public_server_port", "green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client = Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port,", "return local_server_host = '127.0.0.1' public_server_port = message[\"public_server_port\"] private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\")", "json.loads(await websocket.recv()) if message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr) if message.get(\"error\"): pretty_print( f\"[bold", "import threading import websockets from rich import print as pretty_print from .tcp import", "f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client = Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, ) while", "import certifi import threading import websockets from rich import print as pretty_print from", ") while True: message = json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO: [bold white] New Connection", "pretty_print( f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr) if message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr) return", "local_server_port=local_server_port, ) while True: message = json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO: [bold white] New", "f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr) return local_server_host = '127.0.0.1' public_server_port = message[\"public_server_port\"] private_server_port =", "sys import ssl import json import certifi import threading import websockets from rich", ".tcp import Client ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async", "import Client ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async with", "ssl=ssl_context) as websocket: message = json.loads(await websocket.recv()) if message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING: {message['warning']}\",", "async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async with websockets.connect(ws_uri, ssl=ssl_context) as websocket: message =", "message[\"public_server_port\"] private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client", "ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async with websockets.connect(ws_uri, ssl=ssl_context)", "Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client = Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host,", "while True: message = json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO: [bold white] New Connection +1\")", "remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, ) while True: message = json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO: [bold", "local_server_port): async with websockets.connect(ws_uri, ssl=ssl_context) as websocket: message = json.loads(await websocket.recv()) if message.get(\"warning\"):", "Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, ) while True: message = json.loads(await websocket.recv()) pretty_print(\"[bold", "= ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async with websockets.connect(ws_uri, ssl=ssl_context) as", "= '127.0.0.1' public_server_port = message[\"public_server_port\"] private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold", "from rich import print as pretty_print from .tcp import Client ssl_context = ssl.create_default_context()", "client = Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, ) while True: message = json.loads(await", "file=sys.stderr) return local_server_host = '127.0.0.1' public_server_port = message[\"public_server_port\"] private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold", "from .tcp import Client ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port):", "→ 127.0.0.1:{local_server_port}'}\") client = Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, ) while True: message", "cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client = Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, ) while True:", "if message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr) if message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR: {message['error']}\",", "= Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, ) while True: message = json.loads(await websocket.recv())", "pretty_print( f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr) return local_server_host = '127.0.0.1' public_server_port = message[\"public_server_port\"] private_server_port", "with websockets.connect(ws_uri, ssl=ssl_context) as websocket: message = json.loads(await websocket.recv()) if message.get(\"warning\"): pretty_print( f\"[bold", "websocket: message = json.loads(await websocket.recv()) if message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr) if", "= json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO: [bold white] New Connection +1\") threading.Thread( target=client.process, args=(message,", "pretty_print from .tcp import Client ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri, remote_server_host,", "import sys import ssl import json import certifi import threading import websockets from", "= message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client = Client(", "local_server_host=local_server_host, local_server_port=local_server_port, ) while True: message = json.loads(await websocket.recv()) pretty_print(\"[bold green]INFO: [bold white]", "message = json.loads(await websocket.recv()) if message.get(\"warning\"): pretty_print( f\"[bold yellow]WARNING: {message['warning']}\", file=sys.stderr) if message.get(\"error\"):", "message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr) return local_server_host = '127.0.0.1' public_server_port = message[\"public_server_port\"]", "rich import print as pretty_print from .tcp import Client ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where())", "ssl import json import certifi import threading import websockets from rich import print", "message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client = Client( remote_server_host=remote_server_host,", "private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client =", "ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async with websockets.connect(ws_uri, ssl=ssl_context) as websocket: message", "pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port} → 127.0.0.1:{local_server_port}'}\") client = Client( remote_server_host=remote_server_host, remote_server_port=private_server_port, local_server_host=local_server_host, local_server_port=local_server_port, )", "'127.0.0.1' public_server_port = message[\"public_server_port\"] private_server_port = message[\"private_server_port\"] pretty_print(f\"{'Tunnel Status:':<25}[bold green]Online\") pretty_print( f\"{'Forwarded:':<25}{f'[bold cyan]{remote_server_host}:{public_server_port}", "{message['warning']}\", file=sys.stderr) if message.get(\"error\"): pretty_print( f\"[bold yellow]ERROR: {message['error']}\", file=sys.stderr) return local_server_host = '127.0.0.1'", "Client ssl_context = ssl.create_default_context() ssl_context.load_verify_locations(certifi.where()) async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port): async with websockets.connect(ws_uri," ]
[ "for l in range(0, 3): for c in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') print()", "range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') print() soma = matriz[0][2] + matriz[1][2] + matriz[2][2] print('='", "print(f'[{matriz[l][c]:^5}]', end='') print() soma = matriz[0][2] + matriz[1][2] + matriz[2][2] print('=' * 30)", "+ matriz[2][2] print('=' * 30) print(f'A soma de todos os valores pares digitados", "[0, 0, 0]] par = [] maior = 0 for l in range(0,", "[] maior = 0 for l in range(0, 3): for c in range(0,", "in range(0, 3): for c in range(0, 3): matriz[l][c] = int(input(f'Digite um valor", "for c in range(0, 3): matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]: '))", "range(0, 3): for c in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') print() soma = matriz[0][2]", "[0, 0, 0], [0, 0, 0]] par = [] maior = 0 for", "* 30) print(f'A soma de todos os valores pares digitados foi {sum(par)}') print(f'A", "')) if matriz[l][c] % 2 == 0: par.append(matriz[l][c]) print('=' * 30) for l", "c in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') print() soma = matriz[0][2] + matriz[1][2] +", "print(f'A soma dos valores da terceira coluna foi {soma}') print(f'E o maior valor", "de todos os valores pares digitados foi {sum(par)}') print(f'A soma dos valores da", "[{l},{c}]: ')) if matriz[l][c] % 2 == 0: par.append(matriz[l][c]) print('=' * 30) for", "valor para [{l},{c}]: ')) if matriz[l][c] % 2 == 0: par.append(matriz[l][c]) print('=' *", "para [{l},{c}]: ')) if matriz[l][c] % 2 == 0: par.append(matriz[l][c]) print('=' * 30)", "print('=' * 30) print(f'A soma de todos os valores pares digitados foi {sum(par)}')", "matriz[2][2] print('=' * 30) print(f'A soma de todos os valores pares digitados foi", "0, 0]] par = [] maior = 0 for l in range(0, 3):", "3): matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]: ')) if matriz[l][c] % 2", "for l in range(0, 3): for c in range(0, 3): matriz[l][c] = int(input(f'Digite", "0, 0], [0, 0, 0], [0, 0, 0]] par = [] maior =", "matriz[l][c] % 2 == 0: par.append(matriz[l][c]) print('=' * 30) for l in range(0,", "0 for l in range(0, 3): for c in range(0, 3): matriz[l][c] =", "os valores pares digitados foi {sum(par)}') print(f'A soma dos valores da terceira coluna", "matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]] par =", "% 2 == 0: par.append(matriz[l][c]) print('=' * 30) for l in range(0, 3):", "par = [] maior = 0 for l in range(0, 3): for c", "0], [0, 0, 0], [0, 0, 0]] par = [] maior = 0", "3): for c in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') print() soma = matriz[0][2] +", "0: par.append(matriz[l][c]) print('=' * 30) for l in range(0, 3): for c in", "in range(0, 3): matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]: ')) if matriz[l][c]", "== 0: par.append(matriz[l][c]) print('=' * 30) for l in range(0, 3): for c", "matriz[0][2] + matriz[1][2] + matriz[2][2] print('=' * 30) print(f'A soma de todos os", "= 0 for l in range(0, 3): for c in range(0, 3): matriz[l][c]", "valores da terceira coluna foi {soma}') print(f'E o maior valor da segunda linha", "end='') print() soma = matriz[0][2] + matriz[1][2] + matriz[2][2] print('=' * 30) print(f'A", "matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]: ')) if matriz[l][c] % 2 ==", "digitados foi {sum(par)}') print(f'A soma dos valores da terceira coluna foi {soma}') print(f'E", "for c in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') print() soma = matriz[0][2] + matriz[1][2]", "* 30) for l in range(0, 3): for c in range(0, 3): print(f'[{matriz[l][c]:^5}]',", "[[0, 0, 0], [0, 0, 0], [0, 0, 0]] par = [] maior", "in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') print() soma = matriz[0][2] + matriz[1][2] + matriz[2][2]", "par.append(matriz[l][c]) print('=' * 30) for l in range(0, 3): for c in range(0,", "range(0, 3): for c in range(0, 3): matriz[l][c] = int(input(f'Digite um valor para", "range(0, 3): matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]: ')) if matriz[l][c] %", "print() soma = matriz[0][2] + matriz[1][2] + matriz[2][2] print('=' * 30) print(f'A soma", "um valor para [{l},{c}]: ')) if matriz[l][c] % 2 == 0: par.append(matriz[l][c]) print('='", "30) for l in range(0, 3): for c in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='')", "print(f'A soma de todos os valores pares digitados foi {sum(par)}') print(f'A soma dos", "{sum(par)}') print(f'A soma dos valores da terceira coluna foi {soma}') print(f'E o maior", "matriz[1][2] + matriz[2][2] print('=' * 30) print(f'A soma de todos os valores pares", "soma = matriz[0][2] + matriz[1][2] + matriz[2][2] print('=' * 30) print(f'A soma de", "<reponame>honeyhugh/PythonCurso<filename>ex087.py matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]] par", "= matriz[0][2] + matriz[1][2] + matriz[2][2] print('=' * 30) print(f'A soma de todos", "0]] par = [] maior = 0 for l in range(0, 3): for", "soma dos valores da terceira coluna foi {soma}') print(f'E o maior valor da", "30) print(f'A soma de todos os valores pares digitados foi {sum(par)}') print(f'A soma", "+ matriz[1][2] + matriz[2][2] print('=' * 30) print(f'A soma de todos os valores", "pares digitados foi {sum(par)}') print(f'A soma dos valores da terceira coluna foi {soma}')", "if matriz[l][c] % 2 == 0: par.append(matriz[l][c]) print('=' * 30) for l in", "dos valores da terceira coluna foi {soma}') print(f'E o maior valor da segunda", "0], [0, 0, 0]] par = [] maior = 0 for l in", "in range(0, 3): for c in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') print() soma =", "= int(input(f'Digite um valor para [{l},{c}]: ')) if matriz[l][c] % 2 == 0:", "= [] maior = 0 for l in range(0, 3): for c in", "3): for c in range(0, 3): matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]:", "c in range(0, 3): matriz[l][c] = int(input(f'Digite um valor para [{l},{c}]: ')) if", "da terceira coluna foi {soma}') print(f'E o maior valor da segunda linha foi", "l in range(0, 3): for c in range(0, 3): print(f'[{matriz[l][c]:^5}]', end='') print() soma", "3): print(f'[{matriz[l][c]:^5}]', end='') print() soma = matriz[0][2] + matriz[1][2] + matriz[2][2] print('=' *", "print('=' * 30) for l in range(0, 3): for c in range(0, 3):", "valores pares digitados foi {sum(par)}') print(f'A soma dos valores da terceira coluna foi", "= [[0, 0, 0], [0, 0, 0], [0, 0, 0]] par = []", "int(input(f'Digite um valor para [{l},{c}]: ')) if matriz[l][c] % 2 == 0: par.append(matriz[l][c])", "l in range(0, 3): for c in range(0, 3): matriz[l][c] = int(input(f'Digite um", "0, 0], [0, 0, 0]] par = [] maior = 0 for l", "soma de todos os valores pares digitados foi {sum(par)}') print(f'A soma dos valores", "todos os valores pares digitados foi {sum(par)}') print(f'A soma dos valores da terceira", "terceira coluna foi {soma}') print(f'E o maior valor da segunda linha foi {max(matriz[1][:])}')", "maior = 0 for l in range(0, 3): for c in range(0, 3):", "2 == 0: par.append(matriz[l][c]) print('=' * 30) for l in range(0, 3): for", "foi {sum(par)}') print(f'A soma dos valores da terceira coluna foi {soma}') print(f'E o" ]
[ "processed image. Args: image (np.ndarray): original image bboxes (List): detected bounding box scores", "Tuple, List import cv2 import numpy as np import streamlit as st from", "-> Optional[np.ndarray]: \"\"\"show upload image area Returns: Optional[np.ndarray]: uploaded image \"\"\" uploaded_file =", "is not None: file_bytes = np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, 1) return", "(str): header message description (str): description text \"\"\" st.subheader(header) st.markdown(description) def object_detector_ui() ->", "image (np.ndarray): original image bboxes (List): detected bounding box scores (List): detected score", "return num_thread, model_type, confidence_threshold def upload_image() -> Optional[np.ndarray]: \"\"\"show upload image area Returns:", "image else: return None def show_image(image: np.ndarray, bboxes: List, scores: List, classes: List,", "import Image from utils.model import MODEL_TYPE, draw_bboxes def description(header: str, description: str): \"\"\"show", "Tuple[int, str, float]: \"\"\"show object detector ui in sidebar Returns: Tuple[int, str, float]:", "Thread\", 1, 4, 1, 1) confidence_threshold = st.sidebar.slider( \"Confidence threshold\", 0.0, 1.0, 0.5,", "new_image.shape[2] == 3: # カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2] == 4:", "in sidebar Returns: Tuple[int, str, float]: [number of threads, model type string, threshold]", "new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2] == 4: # 透過 new_image = cv2.cvtColor(new_image,", "st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"JPG\"]) if uploaded_file is not None: file_bytes = np.asarray(", "detected bounding box scores (List): detected score classes (List): detected class names detect_num", "upload_image() -> Optional[np.ndarray]: \"\"\"show upload image area Returns: Optional[np.ndarray]: uploaded image \"\"\" uploaded_file", "1, 1) confidence_threshold = st.sidebar.slider( \"Confidence threshold\", 0.0, 1.0, 0.5, 0.01) model_type =", "detected score classes (List): detected class names detect_num (int): number of detection elapsed_time", "Optional, Tuple, List import cv2 import numpy as np import streamlit as st", "+ \"[msec]**\") pass def cv2pil(image: np.ndarray) -> Image: \"\"\"cv2 image to PIL image", "pass elif new_image.shape[2] == 3: # カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2]", "def description(header: str, description: str): \"\"\"show description Args: header (str): header message description", "= draw_bboxes(image, bboxes, scores, classes, detect_num) image = cv2pil(image) st.image(image, caption='Uploaded Image.', use_column_width=True)", "(List): detected bounding box scores (List): detected score classes (List): detected class names", "score classes (List): detected class names detect_num (int): number of detection elapsed_time (int):", "type=[\"jpg\", \"JPG\"]) if uploaded_file is not None: file_bytes = np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image", "classes, detect_num) image = cv2pil(image) st.image(image, caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed time : \"", "List, detect_num: int, elapsed_time: int): \"\"\"show processed image. Args: image (np.ndarray): original image", "st.sidebar.radio(\"Model Type\", MODEL_TYPE) return num_thread, model_type, confidence_threshold def upload_image() -> Optional[np.ndarray]: \"\"\"show upload", "elapsed_time (int): processing time \"\"\" image = draw_bboxes(image, bboxes, scores, classes, detect_num) image", "Returns: Tuple[int, str, float]: [number of threads, model type string, threshold] \"\"\" st.sidebar.markdown(\"#", "elapsed_time: int): \"\"\"show processed image. Args: image (np.ndarray): original image bboxes (List): detected", "from typing import Optional, Tuple, List import cv2 import numpy as np import", "Args: image (np.ndarray): original image bboxes (List): detected bounding box scores (List): detected", "3: # カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2] == 4: # 透過", "description (str): description text \"\"\" st.subheader(header) st.markdown(description) def object_detector_ui() -> Tuple[int, str, float]:", "(np.ndarray): original image bboxes (List): detected bounding box scores (List): detected score classes", "detect_num: int, elapsed_time: int): \"\"\"show processed image. Args: image (np.ndarray): original image bboxes", "threshold] \"\"\" st.sidebar.markdown(\"# Model Config\") num_thread = st.sidebar.slider(\"Number of Thread\", 1, 4, 1,", "draw_bboxes(image, bboxes, scores, classes, detect_num) image = cv2pil(image) st.image(image, caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed", "image area Returns: Optional[np.ndarray]: uploaded image \"\"\" uploaded_file = st.file_uploader(\"Choose an image...\", type=[\"jpg\",", "List import cv2 import numpy as np import streamlit as st from PIL", "= image.copy() if new_image.ndim == 2: # モノクロ pass elif new_image.shape[2] == 3:", "\"\"\" image = draw_bboxes(image, bboxes, scores, classes, detect_num) image = cv2pil(image) st.image(image, caption='Uploaded", "\"Confidence threshold\", 0.0, 1.0, 0.5, 0.01) model_type = st.sidebar.radio(\"Model Type\", MODEL_TYPE) return num_thread,", "== 3: # カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2] == 4: #", "cv2 import numpy as np import streamlit as st from PIL import Image", "as np import streamlit as st from PIL import Image from utils.model import", "st.sidebar.slider( \"Confidence threshold\", 0.0, 1.0, 0.5, 0.01) model_type = st.sidebar.radio(\"Model Type\", MODEL_TYPE) return", "Model Config\") num_thread = st.sidebar.slider(\"Number of Thread\", 1, 4, 1, 1) confidence_threshold =", "\"\"\" st.sidebar.markdown(\"# Model Config\") num_thread = st.sidebar.slider(\"Number of Thread\", 1, 4, 1, 1)", "scores: List, classes: List, detect_num: int, elapsed_time: int): \"\"\"show processed image. Args: image", "detector ui in sidebar Returns: Tuple[int, str, float]: [number of threads, model type", "(List): detected class names detect_num (int): number of detection elapsed_time (int): processing time", "-*- from typing import Optional, Tuple, List import cv2 import numpy as np", "MODEL_TYPE) return num_thread, model_type, confidence_threshold def upload_image() -> Optional[np.ndarray]: \"\"\"show upload image area", "def upload_image() -> Optional[np.ndarray]: \"\"\"show upload image area Returns: Optional[np.ndarray]: uploaded image \"\"\"", "uploaded image \"\"\" uploaded_file = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"JPG\"]) if uploaded_file is", "import Optional, Tuple, List import cv2 import numpy as np import streamlit as", "threads, model type string, threshold] \"\"\" st.sidebar.markdown(\"# Model Config\") num_thread = st.sidebar.slider(\"Number of", "= st.sidebar.radio(\"Model Type\", MODEL_TYPE) return num_thread, model_type, confidence_threshold def upload_image() -> Optional[np.ndarray]: \"\"\"show", "upload image area Returns: Optional[np.ndarray]: uploaded image \"\"\" uploaded_file = st.file_uploader(\"Choose an image...\",", "import numpy as np import streamlit as st from PIL import Image from", "uploaded_file = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"JPG\"]) if uploaded_file is not None: file_bytes", "Returns: Image: PIL image \"\"\" new_image = image.copy() if new_image.ndim == 2: #", "else: return None def show_image(image: np.ndarray, bboxes: List, scores: List, classes: List, detect_num:", "float]: \"\"\"show object detector ui in sidebar Returns: Tuple[int, str, float]: [number of", "text \"\"\" st.subheader(header) st.markdown(description) def object_detector_ui() -> Tuple[int, str, float]: \"\"\"show object detector", "bounding box scores (List): detected score classes (List): detected class names detect_num (int):", "scores (List): detected score classes (List): detected class names detect_num (int): number of", "(int): number of detection elapsed_time (int): processing time \"\"\" image = draw_bboxes(image, bboxes,", "[number of threads, model type string, threshold] \"\"\" st.sidebar.markdown(\"# Model Config\") num_thread =", "st.markdown(\"**elapsed time : \" + str(elapsed_time) + \"[msec]**\") pass def cv2pil(image: np.ndarray) ->", "import streamlit as st from PIL import Image from utils.model import MODEL_TYPE, draw_bboxes", "\" + str(elapsed_time) + \"[msec]**\") pass def cv2pil(image: np.ndarray) -> Image: \"\"\"cv2 image", "# モノクロ pass elif new_image.shape[2] == 3: # カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)", "str, description: str): \"\"\"show description Args: header (str): header message description (str): description", "bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, 1) return image else: return None def show_image(image:", "cv2pil(image: np.ndarray) -> Image: \"\"\"cv2 image to PIL image Args: image (np.ndarray): cv2", "-> Image: \"\"\"cv2 image to PIL image Args: image (np.ndarray): cv2 image Returns:", "List, scores: List, classes: List, detect_num: int, elapsed_time: int): \"\"\"show processed image. Args:", "Returns: Optional[np.ndarray]: uploaded image \"\"\" uploaded_file = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"JPG\"]) if", "image. Args: image (np.ndarray): original image bboxes (List): detected bounding box scores (List):", "image...\", type=[\"jpg\", \"JPG\"]) if uploaded_file is not None: file_bytes = np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8)", "model_type, confidence_threshold def upload_image() -> Optional[np.ndarray]: \"\"\"show upload image area Returns: Optional[np.ndarray]: uploaded", "int): \"\"\"show processed image. Args: image (np.ndarray): original image bboxes (List): detected bounding", "Image: \"\"\"cv2 image to PIL image Args: image (np.ndarray): cv2 image Returns: Image:", "int, elapsed_time: int): \"\"\"show processed image. Args: image (np.ndarray): original image bboxes (List):", "detected class names detect_num (int): number of detection elapsed_time (int): processing time \"\"\"", "show_image(image: np.ndarray, bboxes: List, scores: List, classes: List, detect_num: int, elapsed_time: int): \"\"\"show", "image.copy() if new_image.ndim == 2: # モノクロ pass elif new_image.shape[2] == 3: #", "2: # モノクロ pass elif new_image.shape[2] == 3: # カラー new_image = cv2.cvtColor(new_image,", "Config\") num_thread = st.sidebar.slider(\"Number of Thread\", 1, 4, 1, 1) confidence_threshold = st.sidebar.slider(", "str, float]: [number of threads, model type string, threshold] \"\"\" st.sidebar.markdown(\"# Model Config\")", "bboxes, scores, classes, detect_num) image = cv2pil(image) st.image(image, caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed time", "image to PIL image Args: image (np.ndarray): cv2 image Returns: Image: PIL image", "float]: [number of threads, model type string, threshold] \"\"\" st.sidebar.markdown(\"# Model Config\") num_thread", "st.sidebar.slider(\"Number of Thread\", 1, 4, 1, 1) confidence_threshold = st.sidebar.slider( \"Confidence threshold\", 0.0,", "if uploaded_file is not None: file_bytes = np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes,", "detect_num (int): number of detection elapsed_time (int): processing time \"\"\" image = draw_bboxes(image,", "str(elapsed_time) + \"[msec]**\") pass def cv2pil(image: np.ndarray) -> Image: \"\"\"cv2 image to PIL", "to PIL image Args: image (np.ndarray): cv2 image Returns: Image: PIL image \"\"\"", "scores, classes, detect_num) image = cv2pil(image) st.image(image, caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed time :", "description: str): \"\"\"show description Args: header (str): header message description (str): description text", "-*- coding:utf-8 -*- from typing import Optional, Tuple, List import cv2 import numpy", "typing import Optional, Tuple, List import cv2 import numpy as np import streamlit", "st.subheader(header) st.markdown(description) def object_detector_ui() -> Tuple[int, str, float]: \"\"\"show object detector ui in", "not None: file_bytes = np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, 1) return image", "-> Tuple[int, str, float]: \"\"\"show object detector ui in sidebar Returns: Tuple[int, str,", "of threads, model type string, threshold] \"\"\" st.sidebar.markdown(\"# Model Config\") num_thread = st.sidebar.slider(\"Number", "cv2.imdecode(file_bytes, 1) return image else: return None def show_image(image: np.ndarray, bboxes: List, scores:", "\"\"\"show processed image. Args: image (np.ndarray): original image bboxes (List): detected bounding box", "description text \"\"\" st.subheader(header) st.markdown(description) def object_detector_ui() -> Tuple[int, str, float]: \"\"\"show object", "np import streamlit as st from PIL import Image from utils.model import MODEL_TYPE,", "def cv2pil(image: np.ndarray) -> Image: \"\"\"cv2 image to PIL image Args: image (np.ndarray):", "pass def cv2pil(image: np.ndarray) -> Image: \"\"\"cv2 image to PIL image Args: image", "object_detector_ui() -> Tuple[int, str, float]: \"\"\"show object detector ui in sidebar Returns: Tuple[int,", "Type\", MODEL_TYPE) return num_thread, model_type, confidence_threshold def upload_image() -> Optional[np.ndarray]: \"\"\"show upload image", "bboxes (List): detected bounding box scores (List): detected score classes (List): detected class", "processing time \"\"\" image = draw_bboxes(image, bboxes, scores, classes, detect_num) image = cv2pil(image)", "image Returns: Image: PIL image \"\"\" new_image = image.copy() if new_image.ndim == 2:", "= np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, 1) return image else: return None", "confidence_threshold = st.sidebar.slider( \"Confidence threshold\", 0.0, 1.0, 0.5, 0.01) model_type = st.sidebar.radio(\"Model Type\",", "threshold\", 0.0, 1.0, 0.5, 0.01) model_type = st.sidebar.radio(\"Model Type\", MODEL_TYPE) return num_thread, model_type,", "np.ndarray) -> Image: \"\"\"cv2 image to PIL image Args: image (np.ndarray): cv2 image", "4, 1, 1) confidence_threshold = st.sidebar.slider( \"Confidence threshold\", 0.0, 1.0, 0.5, 0.01) model_type", "def object_detector_ui() -> Tuple[int, str, float]: \"\"\"show object detector ui in sidebar Returns:", "# -*- coding:utf-8 -*- from typing import Optional, Tuple, List import cv2 import", "original image bboxes (List): detected bounding box scores (List): detected score classes (List):", "# カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2] == 4: # 透過 new_image", "image bboxes (List): detected bounding box scores (List): detected score classes (List): detected", "Args: header (str): header message description (str): description text \"\"\" st.subheader(header) st.markdown(description) def", "as st from PIL import Image from utils.model import MODEL_TYPE, draw_bboxes def description(header:", "return image else: return None def show_image(image: np.ndarray, bboxes: List, scores: List, classes:", "classes (List): detected class names detect_num (int): number of detection elapsed_time (int): processing", "Tuple[int, str, float]: [number of threads, model type string, threshold] \"\"\" st.sidebar.markdown(\"# Model", "None: file_bytes = np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, 1) return image else:", "(str): description text \"\"\" st.subheader(header) st.markdown(description) def object_detector_ui() -> Tuple[int, str, float]: \"\"\"show", "1) return image else: return None def show_image(image: np.ndarray, bboxes: List, scores: List,", "= cv2pil(image) st.image(image, caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed time : \" + str(elapsed_time) +", "1.0, 0.5, 0.01) model_type = st.sidebar.radio(\"Model Type\", MODEL_TYPE) return num_thread, model_type, confidence_threshold def", "image (np.ndarray): cv2 image Returns: Image: PIL image \"\"\" new_image = image.copy() if", "an image...\", type=[\"jpg\", \"JPG\"]) if uploaded_file is not None: file_bytes = np.asarray( bytearray(uploaded_file.read()),", "detect_num) image = cv2pil(image) st.image(image, caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed time : \" +", "cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2] == 4: # 透過 new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA) new_image", "np.ndarray, bboxes: List, scores: List, classes: List, detect_num: int, elapsed_time: int): \"\"\"show processed", "Optional[np.ndarray]: uploaded image \"\"\" uploaded_file = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"JPG\"]) if uploaded_file", "Args: image (np.ndarray): cv2 image Returns: Image: PIL image \"\"\" new_image = image.copy()", "\"\"\"show object detector ui in sidebar Returns: Tuple[int, str, float]: [number of threads,", "model type string, threshold] \"\"\" st.sidebar.markdown(\"# Model Config\") num_thread = st.sidebar.slider(\"Number of Thread\",", "model_type = st.sidebar.radio(\"Model Type\", MODEL_TYPE) return num_thread, model_type, confidence_threshold def upload_image() -> Optional[np.ndarray]:", "type string, threshold] \"\"\" st.sidebar.markdown(\"# Model Config\") num_thread = st.sidebar.slider(\"Number of Thread\", 1,", "image = cv2pil(image) st.image(image, caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed time : \" + str(elapsed_time)", "new_image.shape[2] == 4: # 透過 new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA) new_image = Image.fromarray(new_image) return", "description(header: str, description: str): \"\"\"show description Args: header (str): header message description (str):", "str, float]: \"\"\"show object detector ui in sidebar Returns: Tuple[int, str, float]: [number", "== 4: # 透過 new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA) new_image = Image.fromarray(new_image) return new_image", "\"\"\"show upload image area Returns: Optional[np.ndarray]: uploaded image \"\"\" uploaded_file = st.file_uploader(\"Choose an", "area Returns: Optional[np.ndarray]: uploaded image \"\"\" uploaded_file = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"JPG\"])", "of detection elapsed_time (int): processing time \"\"\" image = draw_bboxes(image, bboxes, scores, classes,", "utils.model import MODEL_TYPE, draw_bboxes def description(header: str, description: str): \"\"\"show description Args: header", "from PIL import Image from utils.model import MODEL_TYPE, draw_bboxes def description(header: str, description:", "detection elapsed_time (int): processing time \"\"\" image = draw_bboxes(image, bboxes, scores, classes, detect_num)", "new_image.ndim == 2: # モノクロ pass elif new_image.shape[2] == 3: # カラー new_image", "image = cv2.imdecode(file_bytes, 1) return image else: return None def show_image(image: np.ndarray, bboxes:", "uploaded_file is not None: file_bytes = np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, 1)", "PIL image \"\"\" new_image = image.copy() if new_image.ndim == 2: # モノクロ pass", "MODEL_TYPE, draw_bboxes def description(header: str, description: str): \"\"\"show description Args: header (str): header", "elif new_image.shape[2] == 3: # カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2] ==", "time \"\"\" image = draw_bboxes(image, bboxes, scores, classes, detect_num) image = cv2pil(image) st.image(image,", "bboxes: List, scores: List, classes: List, detect_num: int, elapsed_time: int): \"\"\"show processed image.", "ui in sidebar Returns: Tuple[int, str, float]: [number of threads, model type string,", "Image.', use_column_width=True) st.markdown(\"**elapsed time : \" + str(elapsed_time) + \"[msec]**\") pass def cv2pil(image:", "(np.ndarray): cv2 image Returns: Image: PIL image \"\"\" new_image = image.copy() if new_image.ndim", "0.0, 1.0, 0.5, 0.01) model_type = st.sidebar.radio(\"Model Type\", MODEL_TYPE) return num_thread, model_type, confidence_threshold", "= cv2.imdecode(file_bytes, 1) return image else: return None def show_image(image: np.ndarray, bboxes: List,", "= st.sidebar.slider(\"Number of Thread\", 1, 4, 1, 1) confidence_threshold = st.sidebar.slider( \"Confidence threshold\",", "caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed time : \" + str(elapsed_time) + \"[msec]**\") pass def", "cv2 image Returns: Image: PIL image \"\"\" new_image = image.copy() if new_image.ndim ==", "string, threshold] \"\"\" st.sidebar.markdown(\"# Model Config\") num_thread = st.sidebar.slider(\"Number of Thread\", 1, 4,", "\"\"\"show description Args: header (str): header message description (str): description text \"\"\" st.subheader(header)", "PIL import Image from utils.model import MODEL_TYPE, draw_bboxes def description(header: str, description: str):", "= st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"JPG\"]) if uploaded_file is not None: file_bytes =", "1, 4, 1, 1) confidence_threshold = st.sidebar.slider( \"Confidence threshold\", 0.0, 1.0, 0.5, 0.01)", "\"\"\" uploaded_file = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"JPG\"]) if uploaded_file is not None:", "\"\"\" new_image = image.copy() if new_image.ndim == 2: # モノクロ pass elif new_image.shape[2]", "new_image = image.copy() if new_image.ndim == 2: # モノクロ pass elif new_image.shape[2] ==", "image \"\"\" uploaded_file = st.file_uploader(\"Choose an image...\", type=[\"jpg\", \"JPG\"]) if uploaded_file is not", "import cv2 import numpy as np import streamlit as st from PIL import", "num_thread = st.sidebar.slider(\"Number of Thread\", 1, 4, 1, 1) confidence_threshold = st.sidebar.slider( \"Confidence", "classes: List, detect_num: int, elapsed_time: int): \"\"\"show processed image. Args: image (np.ndarray): original", "0.5, 0.01) model_type = st.sidebar.radio(\"Model Type\", MODEL_TYPE) return num_thread, model_type, confidence_threshold def upload_image()", "return None def show_image(image: np.ndarray, bboxes: List, scores: List, classes: List, detect_num: int,", "of Thread\", 1, 4, 1, 1) confidence_threshold = st.sidebar.slider( \"Confidence threshold\", 0.0, 1.0,", "if new_image.ndim == 2: # モノクロ pass elif new_image.shape[2] == 3: # カラー", "draw_bboxes def description(header: str, description: str): \"\"\"show description Args: header (str): header message", "file_bytes = np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, 1) return image else: return", "np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image = cv2.imdecode(file_bytes, 1) return image else: return None def", "(int): processing time \"\"\" image = draw_bboxes(image, bboxes, scores, classes, detect_num) image =", "use_column_width=True) st.markdown(\"**elapsed time : \" + str(elapsed_time) + \"[msec]**\") pass def cv2pil(image: np.ndarray)", "\"[msec]**\") pass def cv2pil(image: np.ndarray) -> Image: \"\"\"cv2 image to PIL image Args:", "object detector ui in sidebar Returns: Tuple[int, str, float]: [number of threads, model", "カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2] == 4: # 透過 new_image =", "header (str): header message description (str): description text \"\"\" st.subheader(header) st.markdown(description) def object_detector_ui()", "Optional[np.ndarray]: \"\"\"show upload image area Returns: Optional[np.ndarray]: uploaded image \"\"\" uploaded_file = st.file_uploader(\"Choose", "== 2: # モノクロ pass elif new_image.shape[2] == 3: # カラー new_image =", "PIL image Args: image (np.ndarray): cv2 image Returns: Image: PIL image \"\"\" new_image", "dtype=np.uint8) image = cv2.imdecode(file_bytes, 1) return image else: return None def show_image(image: np.ndarray,", "st.sidebar.markdown(\"# Model Config\") num_thread = st.sidebar.slider(\"Number of Thread\", 1, 4, 1, 1) confidence_threshold", "\"\"\"cv2 image to PIL image Args: image (np.ndarray): cv2 image Returns: Image: PIL", "= cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif new_image.shape[2] == 4: # 透過 new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA)", "image = draw_bboxes(image, bboxes, scores, classes, detect_num) image = cv2pil(image) st.image(image, caption='Uploaded Image.',", ": \" + str(elapsed_time) + \"[msec]**\") pass def cv2pil(image: np.ndarray) -> Image: \"\"\"cv2", "Image from utils.model import MODEL_TYPE, draw_bboxes def description(header: str, description: str): \"\"\"show description", "numpy as np import streamlit as st from PIL import Image from utils.model", "num_thread, model_type, confidence_threshold def upload_image() -> Optional[np.ndarray]: \"\"\"show upload image area Returns: Optional[np.ndarray]:", "None def show_image(image: np.ndarray, bboxes: List, scores: List, classes: List, detect_num: int, elapsed_time:", "header message description (str): description text \"\"\" st.subheader(header) st.markdown(description) def object_detector_ui() -> Tuple[int,", "coding:utf-8 -*- from typing import Optional, Tuple, List import cv2 import numpy as", "cv2pil(image) st.image(image, caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed time : \" + str(elapsed_time) + \"[msec]**\")", "box scores (List): detected score classes (List): detected class names detect_num (int): number", "モノクロ pass elif new_image.shape[2] == 3: # カラー new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB) elif", "\"JPG\"]) if uploaded_file is not None: file_bytes = np.asarray( bytearray(uploaded_file.read()), dtype=np.uint8) image =", "= st.sidebar.slider( \"Confidence threshold\", 0.0, 1.0, 0.5, 0.01) model_type = st.sidebar.radio(\"Model Type\", MODEL_TYPE)", "Image: PIL image \"\"\" new_image = image.copy() if new_image.ndim == 2: # モノクロ", "st.image(image, caption='Uploaded Image.', use_column_width=True) st.markdown(\"**elapsed time : \" + str(elapsed_time) + \"[msec]**\") pass", "cv2.COLOR_BGR2RGB) elif new_image.shape[2] == 4: # 透過 new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA) new_image =", "message description (str): description text \"\"\" st.subheader(header) st.markdown(description) def object_detector_ui() -> Tuple[int, str,", "def show_image(image: np.ndarray, bboxes: List, scores: List, classes: List, detect_num: int, elapsed_time: int):", "List, classes: List, detect_num: int, elapsed_time: int): \"\"\"show processed image. Args: image (np.ndarray):", "confidence_threshold def upload_image() -> Optional[np.ndarray]: \"\"\"show upload image area Returns: Optional[np.ndarray]: uploaded image", "elif new_image.shape[2] == 4: # 透過 new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA) new_image = Image.fromarray(new_image)", "import MODEL_TYPE, draw_bboxes def description(header: str, description: str): \"\"\"show description Args: header (str):", "streamlit as st from PIL import Image from utils.model import MODEL_TYPE, draw_bboxes def", "\"\"\" st.subheader(header) st.markdown(description) def object_detector_ui() -> Tuple[int, str, float]: \"\"\"show object detector ui", "time : \" + str(elapsed_time) + \"[msec]**\") pass def cv2pil(image: np.ndarray) -> Image:", "number of detection elapsed_time (int): processing time \"\"\" image = draw_bboxes(image, bboxes, scores,", "1) confidence_threshold = st.sidebar.slider( \"Confidence threshold\", 0.0, 1.0, 0.5, 0.01) model_type = st.sidebar.radio(\"Model", "image \"\"\" new_image = image.copy() if new_image.ndim == 2: # モノクロ pass elif", "st.markdown(description) def object_detector_ui() -> Tuple[int, str, float]: \"\"\"show object detector ui in sidebar", "sidebar Returns: Tuple[int, str, float]: [number of threads, model type string, threshold] \"\"\"", "(List): detected score classes (List): detected class names detect_num (int): number of detection", "image Args: image (np.ndarray): cv2 image Returns: Image: PIL image \"\"\" new_image =", "str): \"\"\"show description Args: header (str): header message description (str): description text \"\"\"", "+ str(elapsed_time) + \"[msec]**\") pass def cv2pil(image: np.ndarray) -> Image: \"\"\"cv2 image to", "class names detect_num (int): number of detection elapsed_time (int): processing time \"\"\" image", "names detect_num (int): number of detection elapsed_time (int): processing time \"\"\" image =", "from utils.model import MODEL_TYPE, draw_bboxes def description(header: str, description: str): \"\"\"show description Args:", "0.01) model_type = st.sidebar.radio(\"Model Type\", MODEL_TYPE) return num_thread, model_type, confidence_threshold def upload_image() ->", "st from PIL import Image from utils.model import MODEL_TYPE, draw_bboxes def description(header: str,", "description Args: header (str): header message description (str): description text \"\"\" st.subheader(header) st.markdown(description)" ]
[ "in matchlist: yield { 'rank' : match[0], 'src' : match[1], 'name' : match[2],", "request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8') except error as e: return e.reason def parse_(html): ol", "req = request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8') except error as e: return e.reason def", "request.urlopen(req).read().decode('utf-8') except error as e: return e.reason def parse_(html): ol = re.search('<ol class=\"grid_view\">(.*?)</ol>',", "from fake_useragent import UserAgent import re import time def request_(url): try: ua =", "match[3] } def main(): url = 'https://movie.douban.com/top250?start={}' for page in range(10): start =", "from urllib import request, error from fake_useragent import UserAgent import re import time", "headers = {'User-Agent': ua.chrome} req = request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8') except error as", "import UserAgent import re import time def request_(url): try: ua = UserAgent() headers", "'score' : match[3] } def main(): url = 'https://movie.douban.com/top250?start={}' for page in range(10):", "= re.compile(content, re.S).findall(ol) for match in matchlist: yield { 'rank' : match[0], 'src'", "ua.chrome} req = request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8') except error as e: return e.reason", "ua = UserAgent() headers = {'User-Agent': ua.chrome} req = request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8')", "= UserAgent() headers = {'User-Agent': ua.chrome} req = request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8') except", "import request, error from fake_useragent import UserAgent import re import time def request_(url):", "re.S).group(0) content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content, re.S).findall(ol) for match", "return e.reason def parse_(html): ol = re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content = ('<li>.*?<em", "} def main(): url = 'https://movie.douban.com/top250?start={}' for page in range(10): start = page*25", "for match in matchlist: yield { 'rank' : match[0], 'src' : match[1], 'name'", "def main(): url = 'https://movie.douban.com/top250?start={}' for page in range(10): start = page*25 html", "'rank' : match[0], 'src' : match[1], 'name' : match[2], 'score' : match[3] }", "+ 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content, re.S).findall(ol) for match in matchlist: yield { 'rank'", "class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content, re.S).findall(ol)", "return request.urlopen(req).read().decode('utf-8') except error as e: return e.reason def parse_(html): ol = re.search('<ol", "('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content, re.S).findall(ol) for match in matchlist: yield", "time def request_(url): try: ua = UserAgent() headers = {'User-Agent': ua.chrome} req =", "e.reason def parse_(html): ol = re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?'", "re.S).findall(ol) for match in matchlist: yield { 'rank' : match[0], 'src' : match[1],", "match[2], 'score' : match[3] } def main(): url = 'https://movie.douban.com/top250?start={}' for page in", "match[0], 'src' : match[1], 'name' : match[2], 'score' : match[3] } def main():", "start = page*25 html = request_(url.format(start)) time.sleep(0.5) for match in parse_(html): print(match) if", "e: return e.reason def parse_(html): ol = re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content =", "yield { 'rank' : match[0], 'src' : match[1], 'name' : match[2], 'score' :", "'src' : match[1], 'name' : match[2], 'score' : match[3] } def main(): url", "re import time def request_(url): try: ua = UserAgent() headers = {'User-Agent': ua.chrome}", "'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content, re.S).findall(ol) for match in matchlist: yield { 'rank' :", "fake_useragent import UserAgent import re import time def request_(url): try: ua = UserAgent()", "= re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist =", "= {'User-Agent': ua.chrome} req = request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8') except error as e:", "import re import time def request_(url): try: ua = UserAgent() headers = {'User-Agent':", "= 'https://movie.douban.com/top250?start={}' for page in range(10): start = page*25 html = request_(url.format(start)) time.sleep(0.5)", "UserAgent() headers = {'User-Agent': ua.chrome} req = request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8') except error", "error from fake_useragent import UserAgent import re import time def request_(url): try: ua", "page in range(10): start = page*25 html = request_(url.format(start)) time.sleep(0.5) for match in", "{ 'rank' : match[0], 'src' : match[1], 'name' : match[2], 'score' : match[3]", "content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content, re.S).findall(ol) for match in", "= request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8') except error as e: return e.reason def parse_(html):", "import time def request_(url): try: ua = UserAgent() headers = {'User-Agent': ua.chrome} req", "{'User-Agent': ua.chrome} req = request.Request(url, headers=headers) return request.urlopen(req).read().decode('utf-8') except error as e: return", "match in matchlist: yield { 'rank' : match[0], 'src' : match[1], 'name' :", "= page*25 html = request_(url.format(start)) time.sleep(0.5) for match in parse_(html): print(match) if __name__", "def request_(url): try: ua = UserAgent() headers = {'User-Agent': ua.chrome} req = request.Request(url,", ": match[1], 'name' : match[2], 'score' : match[3] } def main(): url =", "class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content, re.S).findall(ol) for match in matchlist: yield {", "matchlist = re.compile(content, re.S).findall(ol) for match in matchlist: yield { 'rank' : match[0],", ": match[2], 'score' : match[3] } def main(): url = 'https://movie.douban.com/top250?start={}' for page", ": match[0], 'src' : match[1], 'name' : match[2], 'score' : match[3] } def", "page*25 html = request_(url.format(start)) time.sleep(0.5) for match in parse_(html): print(match) if __name__ ==", "error as e: return e.reason def parse_(html): ol = re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0)", "main(): url = 'https://movie.douban.com/top250?start={}' for page in range(10): start = page*25 html =", "html, re.S).group(0) content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content, re.S).findall(ol) for", "for page in range(10): start = page*25 html = request_(url.format(start)) time.sleep(0.5) for match", "except error as e: return e.reason def parse_(html): ol = re.search('<ol class=\"grid_view\">(.*?)</ol>', html,", "= request_(url.format(start)) time.sleep(0.5) for match in parse_(html): print(match) if __name__ == '__main__': main()", "as e: return e.reason def parse_(html): ol = re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content", "re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content,", "ol = re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist", "urllib import request, error from fake_useragent import UserAgent import re import time def", "re.compile(content, re.S).findall(ol) for match in matchlist: yield { 'rank' : match[0], 'src' :", "in range(10): start = page*25 html = request_(url.format(start)) time.sleep(0.5) for match in parse_(html):", "try: ua = UserAgent() headers = {'User-Agent': ua.chrome} req = request.Request(url, headers=headers) return", "'https://movie.douban.com/top250?start={}' for page in range(10): start = page*25 html = request_(url.format(start)) time.sleep(0.5) for", "url = 'https://movie.douban.com/top250?start={}' for page in range(10): start = page*25 html = request_(url.format(start))", "UserAgent import re import time def request_(url): try: ua = UserAgent() headers =", "html = request_(url.format(start)) time.sleep(0.5) for match in parse_(html): print(match) if __name__ == '__main__':", "'name' : match[2], 'score' : match[3] } def main(): url = 'https://movie.douban.com/top250?start={}' for", "parse_(html): ol = re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>')", ": match[3] } def main(): url = 'https://movie.douban.com/top250?start={}' for page in range(10): start", "request_(url): try: ua = UserAgent() headers = {'User-Agent': ua.chrome} req = request.Request(url, headers=headers)", "range(10): start = page*25 html = request_(url.format(start)) time.sleep(0.5) for match in parse_(html): print(match)", "def parse_(html): ol = re.search('<ol class=\"grid_view\">(.*?)</ol>', html, re.S).group(0) content = ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' +", "match[1], 'name' : match[2], 'score' : match[3] } def main(): url = 'https://movie.douban.com/top250?start={}'", "headers=headers) return request.urlopen(req).read().decode('utf-8') except error as e: return e.reason def parse_(html): ol =", "request, error from fake_useragent import UserAgent import re import time def request_(url): try:", "matchlist: yield { 'rank' : match[0], 'src' : match[1], 'name' : match[2], 'score'", "= ('<li>.*?<em class=\"\">(\\d+)</em>.*?class=\"hd\".*?href=\"(.*?)\".*?class=\"title\">(.*?)</span>.*?' + 'property=\"v:average\">(.*?)</span>.*?</li>') matchlist = re.compile(content, re.S).findall(ol) for match in matchlist:" ]
[ "<gh_stars>0 import os import sys import uuid import zipfile def extract(source, destination): z", "os import sys import uuid import zipfile def extract(source, destination): z = zipfile.ZipFile(source)", "import os import sys import uuid import zipfile def extract(source, destination): z =", "destination): z = zipfile.ZipFile(source) for f in z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")): with open(os.path.join(destination,", "zipfile.ZipFile(source) for f in z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")): with open(os.path.join(destination, \"{}.jpg\".format(str(uuid.uuid4()))), \"wb\") as", "z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")): with open(os.path.join(destination, \"{}.jpg\".format(str(uuid.uuid4()))), \"wb\") as outfile: outfile.write(z.read(f)) extract(\"/media/val/Sources/OnSource/servicerequests/servicerequests-20170223T162149Z-004.zip\", \"/media/val/SSD/temp\")", "sys import uuid import zipfile def extract(source, destination): z = zipfile.ZipFile(source) for f", "def extract(source, destination): z = zipfile.ZipFile(source) for f in z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")):", "f in z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")): with open(os.path.join(destination, \"{}.jpg\".format(str(uuid.uuid4()))), \"wb\") as outfile: outfile.write(z.read(f))", "= zipfile.ZipFile(source) for f in z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")): with open(os.path.join(destination, \"{}.jpg\".format(str(uuid.uuid4()))), \"wb\")", "z = zipfile.ZipFile(source) for f in z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")): with open(os.path.join(destination, \"{}.jpg\".format(str(uuid.uuid4()))),", "import zipfile def extract(source, destination): z = zipfile.ZipFile(source) for f in z.namelist(): if(f.upper().endswith(\".JPG\")", "import sys import uuid import zipfile def extract(source, destination): z = zipfile.ZipFile(source) for", "extract(source, destination): z = zipfile.ZipFile(source) for f in z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")): with", "in z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")): with open(os.path.join(destination, \"{}.jpg\".format(str(uuid.uuid4()))), \"wb\") as outfile: outfile.write(z.read(f)) extract(\"/media/val/Sources/OnSource/servicerequests/servicerequests-20170223T162149Z-004.zip\",", "for f in z.namelist(): if(f.upper().endswith(\".JPG\") or f.upper().endswith(\".JPEG\")): with open(os.path.join(destination, \"{}.jpg\".format(str(uuid.uuid4()))), \"wb\") as outfile:", "uuid import zipfile def extract(source, destination): z = zipfile.ZipFile(source) for f in z.namelist():", "zipfile def extract(source, destination): z = zipfile.ZipFile(source) for f in z.namelist(): if(f.upper().endswith(\".JPG\") or", "import uuid import zipfile def extract(source, destination): z = zipfile.ZipFile(source) for f in" ]
[]
[ "after finalizing meta_attr def _get_field_name(n: str) -> str: if n in meta_attrs['_field_defs_']: return", "= () exclude_values_up = {'':()} exclude_values_down = {'':()} #internal _field_defs_: Dict[str, Field] _fields_:", "model could look like this: ```python class User(Base): name = Field('varchar(65)') email =", "type ({type(arg)}) to Model __init__ method. Expected: dictionary or keyword argument\") for k,v", "fields = self.Meta._fields_ if k in fields: fields[k].delete_value() else: super().__delattr__(k) def __getattr__(self, k):", "from spelling mistake f.profession: 'Teacher', # safe from spelling mistake 'hobby': 'Gardenning', #", "trying to set an invalid value: {v}') def __repr__(self): reprs = [] for", "v is Void: return False if k in exclude_values: if v in exclude_values[k]:", "pass async def _pre_update_(self, db): \"\"\"Pre-update hook. Override to run pre update cleanup.", "class itself. parents = tuple(b for b in bases if isinstance(b, ModelType)) if", "'{k}' in class '{class_name}.Meta'. Required {required_type}.\") meta_attrs[k] = given_value except AttributeError: if inherit:", "{} # dict is ordered, officially from python 3.7 for n, v in", "needs to be defined here, not in meta.Meta # meta.Meta is used in", "def __delattr__(self, k): raise NotImplementedError(\"You can not delete model attributes outside model definition.\")", "save cleanup. Args: db (DB): db handle. \"\"\" pass async def _pre_delete_(self, db):", "== 'Meta': raise AttributeError(f\"Name '{k} is reserved. You should not try to change", "inherit=False) new_attrs = {} # dict is ordered, officially from python 3.7 for", "v) def _get_all_fields_(self) -> Dict[str, Field]: \"\"\"Get all fields on model without applying", "if k in fields: v = fields[k].value if self.__class__._is_valid_down_(k, v): return v raise", "k in fields: v = fields[k].value if self.__class__._is_valid_down_(k, v): return v raise AttributeError(f'Invalid", "class. Or it does not have any valid value.') raise AttributeError def __setattr__(self,", "super().__setattr__(k, v) if self.__class__._is_valid_up_(k, v): if k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k)", "TypeError(f\"Invalid argument type ({type(arg)}) to Model __init__ method. Expected: dictionary or keyword argument\")", "'<NAME>', 'profession': 'Teacher', 'active': True}, age=34) ``` Raises: TypeError: If invalid type of", "def _get_db_table_(self) -> str: \"\"\"Get db table name for model \"\"\" return self.Meta.db_table", "() exclude_values_up = {'':()} exclude_values_down = {'':()} #internal _field_defs_: Dict[str, Field] _fields_: Dict[str,", "Field('varchar(255)', default='Unknown') random = Field('integer', default=get_rand) # function can be default ``` ##", "no attribute \"Meta\" def __new__(mcs, class_name: str, bases: tuple, attrs: dict): # Ensure", "an abstract model or not \"\"\" return self.Meta.abstract def _is_proxy_(self) -> bool: \"\"\"Whether", "Any) -> bool: \"\"\"Check whether the key and value is valid for down", "(data retrieval). Defaults to False. Yields: Iterator[Tuple[str, Any]]: Yields key, value pair \"\"\"", "List[str] def __init__(self, *args, **kwargs): class Meta: _fields_: Dict[str, FieldValue] = {} _fromdb_:", "in arg_items: setattr(self, k, v) for k,v in kwargs.items(): setattr(self, k, v) def", "self.__dict__['func'] = func def __getattr__(self, k): return self.__dict__['func'](k) def __setattr__(self, k, v): raise", "or down (data retrieval). Defaults to False. Yields: Iterator[Tuple[str, Any]]: Yields key, value", "\"\"\"Whether it's an abstract model or not \"\"\" return self.Meta.abstract def _is_proxy_(self) ->", "__delattr__(self, k): fields = self.Meta._fields_ if k in fields: fields[k].delete_value() else: super().__delattr__(k) def", "to run pre save cleanup. Args: db (DB): db handle. \"\"\" pass async", "# default is False proxy = False # default is False # ...", "in data.items(): # if not self._is_valid_value_(k, v, exclude_values): # continue # if gen:", "attrs[n] # we do this after finalizing meta_attr def _get_field_name(n: str) -> str:", "v, self.Meta.exclude_values_up) def _is_valid_down_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True if", "meta_attrs['db_table'] # Field must not contain table_name, because it is void when model", "not self._is_valid_key_(k, fields, exclude_keys): continue yield k def _get_FieldValue_data_valid_(self, data: dict, up=False) ->", "a proxy model. It does not have a valid base or super base", "v: Any) -> bool: \"\"\"Returns True if the value for the key is", "must not be imported here. Meta = mt.Meta # For client use class", "= self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down # new_data = type(data)() for k,v in data.items():", "Example: ```python User(name='<NAME>', profession='Teacher') User({'name': '<NAME>', 'profession': 'Teacher'}) User({'name': '<NAME>', 'profession': 'Teacher'}, age=34)", "setattr(self, k, v) for k,v in kwargs.items(): setattr(self, k, v) def __iter__(self): \"\"\"Iter", "attribute \"Meta\" def __new__(mcs, class_name: str, bases: tuple, attrs: dict): # Ensure initialization", "Iterator[Tuple[str, Any]]: Yields key, value pair \"\"\" if up: exclude_values = self.Meta.exclude_values_up fields", "by other class meta change if mutable: meta_attrs[k] = copy.deepcopy(v) else: meta_attrs[k] =", "# for k,v in data.items(): # if not self._is_valid_value_(k, v, exclude_values): # continue", "v: Any) -> bool: \"\"\"Check whether the key and value is valid for", "self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self, k: str, v: Any, exclude_values: Dict[str, Tuple[Any]]) ->", "continue # if gen: # yield k, v # else: # new_data[k] =", "in {self.__class__.__name__} Meta class. Or it does not have any valid value.') raise", "dict is ordered, officially from python 3.7 for n, v in _class_.__dict__.items(): if", "considering include/exclude down keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k: str)", "v return fields = self.Meta._fields_ if k not in fields: raise AttributeError(f\"No such", "= self.Meta._fields_ if k not in fields: raise AttributeError(f\"No such field ('{k}') in", "exists else raise AttributeError Args: n (str): field name Raises: AttributeError: if field", "'Copyright © <NAME> <https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1' import inspect import", "= Field('integer', default=get_rand) # function can be default ``` ## Initialize a model", "given_value = getattr(meta, k) if internal: raise ValueError(f\"'{k}' is a reserved attribute for", "= Meta for k, v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v) for arg in", "bases, attrs) classcell = attrs.pop('__classcell__', None) class _Meta_(mt.Meta): pass meta = attrs.pop('Meta', _Meta_)", "model '{class_name}'. \\ Field name must not start with underscore.\") if meta_attrs['proxy'] and", "not self._is_valid_value_(k, v.value, exclude_values): continue yield k, v # def _get_data_for_valid_values_(self, data, up=False,", "in fields: raise AttributeError(f\"No such field ('{k}') in model '{self.__class__.__name__}''\") # v =", "if k.endswith('_'): raise AttributeError('_<name>_ such names are reserved for predefined methods.') self.__dict__[k] =", "from morm.fields.field import Field, FieldValue from morm.types import Void import morm.meta as mt", "it is defaulted to 'id' abstract = True # postgresql example id =", "f = User.Meta.f my_data = { f.name: '<NAME>', # safe from spelling mistake", "= n # v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field must not contain table_name, because", "else: meta_attrs[k] = v _set_meta_attr('proxy', False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down',", "tried to set. \"\"\" class Meta: \"\"\"Meta that holds metadata for model \"\"\"", "holds metadata for model \"\"\" # The following needs to be defined here,", "Iterator[Tuple[str, Any]]: \"\"\"Yields valid key,value pairs from data. Validity is checked against include/exclude", "Dict[str, FieldValue] = {} _fromdb_: List[str] = [] # super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta']", "key/value criteria. Args: data (dict): data to be validated. up (bool, optional): whether", "\"\"\" return self.Meta.pk def _get_ordering_(self, quote: str) -> Iterator[Tuple[str, str]]: \"\"\"Yield each ordering", "change if mutable: meta_attrs[k] = copy.deepcopy(v) else: meta_attrs[k] = v _set_meta_attr('proxy', False) _set_meta_attr('pk',", "delete cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_save_(self, db):", "updated_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', value=timestamp) ``` Then a minimal", "is excluded using either exclude_fields_up or exclude_values_up in {self.__class__.__name__} Meta class. Or you", "or not \"\"\" return self.Meta.abstract def _is_proxy_(self) -> bool: \"\"\"Whether it is a", "you are trying to set an invalid value: {v}') def __repr__(self): reprs =", "proxy model. It does not have a valid base or super base non-proxy", "for o in ordering: if o.startswith('-'): direction = 'DESC' o = o[1:] elif", "run pre delete cleanup. Args: db (DB): db handle. \"\"\" pass async def", "k, v where k is field name and v is field value Yields:", "type: ignore from morm.exceptions import ItemDoesNotExistError from morm.fields.field import Field, FieldValue from morm.types", "fields = self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up else: fields = self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down", "excluding Model class itself. parents = tuple(b for b in bases if isinstance(b,", "pair \"\"\" if up: exclude_values = self.Meta.exclude_values_up fields = self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up", "(data update) or down (data retrieval). Defaults to False. Yields: Iterator[Tuple[str, Any]]: Yields", "(str): Quote to apply to the column Yields: Iterator[Tuple[str, str]]: Yields column, direction", "= v return fields = self.Meta._fields_ if k not in fields: raise AttributeError(f\"No", "key is valid considering include/exclude keys \"\"\" if k in exclude_keys: return False", "BaseMeta.abstract except AttributeError: raise TypeError(f\"This model '{class_name}' can not be a proxy model.", "it is void when model is abstract and it gets inherited. meta_attrs['_field_defs_'][n] =", "``` Raises: TypeError: If invalid type of argument is provided. ## Special Model", "# else: # exclude_values = self.Meta.exclude_values_down # new_data = type(data)() # for k,v", "*args, **kwargs): class Meta: _fields_: Dict[str, FieldValue] = {} _fromdb_: List[str] = []", "return new_data def _get_db_table_(self) -> str: \"\"\"Get db table name for model \"\"\"", "be validated. up (bool, optional): whether up (data update) or down (data retrieval).", "o[1:] elif o.startswith('+'): o = o[1:] o = f\"{quote}{o}{quote}\" yield o, direction class", "return random.randint(1, 9) class User(Base): class Meta: db_table = 'myapp_user' abstract = False", "= getattr(BaseMeta, k, v) # mutable values can be changed by other class", "for predefined methods.') self.__dict__[k] = v return fields = self.Meta._fields_ if k not", "False # default is False proxy = False # default is False #", "if '' in exclude_values and v in exclude_values['']: return False return True def", "exclude values \"\"\" if v is Void: return False if k in exclude_values:", "corresponding fields according to the keys. Positional arguments must be dictionaries of keys", "= Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') ``` An advanced model could", "yield k, v # else: # new_data[k] = v # if not gen:", "mypy error: \"ModelType\" has no attribute \"Meta\" def __new__(mcs, class_name: str, bases: tuple,", "not define new field: {n}\") v.name = n # v.sql_conf.conf['table_name'] = meta_attrs['db_table'] #", "`DESC` Args: quote (str): Quote to apply to the column Yields: Iterator[Tuple[str, str]]:", "is not None: new_attrs['__classcell__'] = classcell return super().__new__(mcs, class_name, bases, new_attrs) def __setattr__(self,", "attrs.pop('__classcell__', None) class _Meta_(mt.Meta): pass meta = attrs.pop('Meta', _Meta_) if not inspect.isclass(meta): #TEST:", "self.Meta.exclude_fields_down all_fields = self._get_all_fields_() for k in all_fields: if not self._is_valid_key_(k, fields, exclude_keys):", "name and v is field value Yields: tuple: field_name, field_value \"\"\" for k,", "\"\"\"Get all fields on model without applying any restriction. Returns: Dict[str, Field]: Dictionary", "_Meta_(mt.Meta): pass meta = attrs.pop('Meta', _Meta_) if not inspect.isclass(meta): #TEST: Meta is restricted", "v = fields[k].clean(v) # super().__setattr__(k, v) if self.__class__._is_valid_up_(k, v): if k in self.Meta._fromdb_:", "ValueError(f\"'{k}' is a reserved attribute for class Meta. Error in model '{class_name}'\") given_type", "in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value): yield k, f.value def __delattr__(self, k): fields =", "User(Base): class Meta: db_table = 'myapp_user' abstract = False # default is False", "_pre_insert_(self, db): \"\"\"Pre-insert hook. Override to run pre insert cleanup. Args: db (DB):", "v elif k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can not set field `{k}`.", "key is valid considering exclude up values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_up) def", "key,value pairs from data. Validity is checked against include/exclude key/value criteria. Args: data", "in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can not set field `{k}`. It is excluded", "def _is_valid_value_(self, k: str, v: Any, exclude_values: Dict[str, Tuple[Any]]) -> bool: \"\"\"Returns True", "ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE \"{table}\" ALTER COLUMN \"{column}\" SET DEFAULT NOW()',)) updated_at", "if meta_attrs['proxy']: #proxy model inherits everything try: meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract", "v): raise NotImplementedError(\"You can not set model attributes outside model definition.\") def __delattr__(self,", "Meta = mt.Meta # For client use class _FieldNames(): \"\"\"Access field names \"\"\"", "not set model attributes outside model definition.\") def __delattr__(self, k): raise NotImplementedError(\"You can", "self).__setattr__('Meta', Meta) self.__dict__['Meta'] = Meta for k, v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v)", "NULL', sql_alter=('ALTER TABLE \"{table}\" ALTER COLUMN \"{column}\" SET DEFAULT NOW()',)) updated_at = Field('TIMESTAMP", "str) -> Iterator[Tuple[str, str]]: \"\"\"Yield each ordering from model parsed and converted to", "parents = tuple(b for b in bases if isinstance(b, ModelType)) if not parents:", "required_type = type(v) if not given_type is required_type: raise TypeError(f\"Invalid type {given_type} given", "field names \"\"\" def __init__(self, func): self.__dict__['func'] = func def __getattr__(self, k): return", "self.Meta.exclude_values_up # else: # exclude_values = self.Meta.exclude_values_down # new_data = type(data)() # for", "\"\"\" pass async def _pre_insert_(self, db): \"\"\"Pre-insert hook. Override to run pre insert", "to run post delete cleanup. Args: db (DB): db handle. \"\"\" pass async", "import copy from abc import ABCMeta from asyncpg import Record # type: ignore", "new_attrs) def __setattr__(self, k, v): raise NotImplementedError(\"You can not set model attributes outside", "Special Model Meta attribute `f`: You can access field names from `ModelClass.Meta.f`. This", "for k, f in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value): yield k, f.value def __delattr__(self,", "True def _is_valid_down_key_(self, k: str) -> bool: \"\"\"Returns True if the key is", "field name \"\"\" if up: fields = self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up else: fields", "random def get_rand(): return random.randint(1, 9) class User(Base): class Meta: db_table = 'myapp_user'", "\"{column}\" SET DEFAULT NOW()',)) updated_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', value=timestamp)", "table_name, because it is void when model is abstract and it gets inherited.", "\"\"\" return self.Meta.proxy def _get_pk_(self) -> str: \"\"\"Get primary column name \"\"\" return", "= attrs.pop('Meta', _Meta_) if not inspect.isclass(meta): #TEST: Meta is restricted as a class", "hook. Override to run post insert cleanup. Args: db (DB): db handle. \"\"\"", "-> Iterator[str]: \"\"\"Yields field names that pass include/exclude criteria Args: up (bool, optional):", "exclude_values['']: return False return True def _is_valid_up_value_(self, k: str, v: Any) -> bool:", "# super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta'] = Meta for k, v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k]", "self.Meta._fields_ if k not in fields: raise AttributeError(f\"No such field ('{k}') in model", "pk = 'id' '''Primary key''' db_table = Void abstract = True proxy =", "this: ```python class User(Base): name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)')", "class _FieldNames(): \"\"\"Access field names \"\"\" def __init__(self, func): self.__dict__['func'] = func def", "'' in exclude_values and v in exclude_values['']: return False return True def _is_valid_up_value_(self,", "retrieval) \"\"\" return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v) def _is_valid_up_(self, k: str, v: Any)", "email = Field('varchar(255)') password = Field('varchar(255)') ``` An advanced model could look like:", "= {} # dict is ordered, officially from python 3.7 for n, v", "valid considering exclude up values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_up) def _is_valid_down_value_(self, k:", "raise AttributeError Args: n (str): field name Raises: AttributeError: if field name does", "or not \"\"\" return self.Meta.proxy def _get_pk_(self) -> str: \"\"\"Get primary column name", "that pass include/exclude criteria Args: up (bool, optional): up criteria or down criteria.", "value is valid for up (data update) \"\"\" return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v)", "import ABCMeta from asyncpg import Record # type: ignore from morm.exceptions import ItemDoesNotExistError", "model instance keyword arguments initialize corresponding fields according to the keys. Positional arguments", "k,v in data.items(): # if not self._is_valid_value_(k, v, exclude_values): # continue # if", "= () exclude_fields_down = () exclude_values_up = {'':()} exclude_values_down = {'':()} #internal _field_defs_:", "import morm.meta as mt # for internal use # morm.db must not be", "Field('varchar(255)') ``` An advanced model could look like: ```python import random def get_rand():", "self._is_valid_value_(k, v, self.Meta.exclude_values_down) def _is_valid_down_(self, k: str, v: Any) -> bool: \"\"\"Check whether", "db): \"\"\"Pre-delete hook. Override to run pre delete cleanup. Args: db (DB): db", "because it is void when model is abstract and it gets inherited. meta_attrs['_field_defs_'][n]", "# The following needs to be defined here, not in meta.Meta # meta.Meta", "v = getattr(BaseMeta, k, v) # mutable values can be changed by other", "db (DB): db handle. \"\"\" pass class Model(ModelBase): \"\"\"Base model to be inherited", "passed # through the metaclasses __new__ methods and processed accordingly # to determine", "fields according to the keys. Positional arguments must be dictionaries of keys and", "timestamp class Base(Model): class Meta: pk = 'id' # setting primary key, it", "quote (str): Quote to apply to the column Yields: Iterator[Tuple[str, str]]: Yields column,", "\"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>' __license__ =", "name '{n}' in model '{class_name}'. \\ Field name must not start with underscore.\")", "__author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)'", "self.__dict__['func'](k) def __setattr__(self, k, v): raise NotImplementedError class ModelType(type): Meta: typing.ClassVar # fixing", "self._is_valid_value_(k, v, exclude_values): # continue # if gen: # yield k, v #", "-> str: if n in meta_attrs['_field_defs_']: return n else: raise AttributeError(f\"No such field", "{v}') def __repr__(self): reprs = [] for k, v in self: reprs.append(f'{k}={repr(v)}') body", "Yields key, value pair \"\"\" if up: exclude_values = self.Meta.exclude_values_up fields = self.Meta.fields_up", "True proxy = False ordering = () fields_up = () fields_down = ()", "`AttributeError`. ```python f = User.Meta.f my_data = { f.name: '<NAME>', # safe from", "[] # super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta'] = Meta for k, v in self.__class__.Meta._field_defs_.items():", "arg in args: try: arg_items = arg.items() except AttributeError: raise TypeError(f\"Invalid argument type", "False. Yields: Iterator[Tuple[str, Any]]: Yields key, value pair \"\"\" if up: exclude_values =", "Override to run pre insert cleanup. Args: db (DB): db handle. \"\"\" pass", "async def _post_save_(self, db): \"\"\"Pre-save hook. Override to run post save cleanup. Args:", "NOT NULL') created_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE \"{table}\"", "(DB): db handle. \"\"\" pass async def _post_insert_(self, db): \"\"\"Pre-insert hook. Override to", "() exclude_fields_up = () exclude_fields_down = () exclude_values_up = {'':()} exclude_values_down = {'':()}", "else: raise AttributeError(f\"No such field `{n}` in model `{self.__name__}`\") def _get_fields_(self, up=False) ->", "def __setattr__(self, k: str, v): if k == 'Meta': raise AttributeError(f\"Name '{k} is", "model. Error in model '{class_name}'\") _class_ = super().__new__(mcs, 'x_' + class_name, parents, attrs)", "from morm.exceptions import ItemDoesNotExistError from morm.fields.field import Field, FieldValue from morm.types import Void", "(DB): db handle. \"\"\" pass class Model(ModelBase): \"\"\"Base model to be inherited by", "attribute `f`: You can access field names from `ModelClass.Meta.f`. This allows a spell-safe", "inherits everything try: meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract except AttributeError: raise TypeError(f\"This", "False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ())", "ModelType(type): Meta: typing.ClassVar # fixing mypy error: \"ModelType\" has no attribute \"Meta\" def", "#proxy model inherits everything try: meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract except AttributeError:", "{'':()} #internal _field_defs_: Dict[str, Field] _fields_: Dict[str, FieldValue] _fromdb_: List[str] def __init__(self, *args,", "insert cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_update_(self, db):", "db): \"\"\"Pre-insert hook. Override to run pre insert cleanup. Args: db (DB): db", "db table name for model \"\"\" return self.Meta.db_table def _is_abstract_(self) -> bool: \"\"\"Whether", "'id' '''Primary key''' db_table = Void abstract = True proxy = False ordering", "attempt to access field `{k}`. It is excluded using either exclude_fields_down or exclude_values_down", "attrs: new_attrs[n] = attrs[n] # we do this after finalizing meta_attr def _get_field_name(n:", "cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_save_(self, db): \"\"\"Pre-save", "getattr(_class_, 'Meta', _Meta_) meta_attrs = {} def _set_meta_attr(k, v, mutable=False, inherit=True, internal=False): try:", "name must not start with underscore.\") if meta_attrs['proxy'] and n in attrs: raise", "k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can not set field `{k}`. It is", "in self.Meta._field_defs_: return n else: raise AttributeError(f\"No such field `{n}` in model `{self.__name__}`\")", "\"\"\"Yield each ordering from model parsed and converted to column, direction direction is", "__new__ methods and processed accordingly # to determine which one should be inherited", "underscore.\") if meta_attrs['proxy'] and n in attrs: raise ValueError(f\"Proxy model '{class_name}' can not", "False if k in exclude_values: if v in exclude_values[k]: return False if ''", "reserved for predefined methods.') self.__dict__[k] = v return fields = self.Meta._fields_ if k", "# new_data = type(data)() # for k,v in data.items(): # if not self._is_valid_value_(k,", "# safe from spelling mistake 'hobby': 'Gardenning', # unsafe from spelling mistake }", "'x_' + class_name, parents, attrs) BaseMeta = getattr(_class_, 'Meta', _Meta_) meta_attrs = {}", "if k in exclude_values: if v in exclude_values[k]: return False if '' in", "which one should not. pk = 'id' '''Primary key''' db_table = Void abstract", "# if up: # exclude_values = self.Meta.exclude_values_up # else: # exclude_values = self.Meta.exclude_values_down", "``` Then a minimal model could look like this: ```python class User(Base): name", "v, exclude_values): # continue # if gen: # yield k, v # else:", "in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k) fields[k].value = v elif k in self.Meta._fromdb_:", "field value Yields: tuple: field_name, field_value \"\"\" for k, f in self.Meta._fields_.items(): if", "k: str, v: Any) -> bool: \"\"\"Returns True if the value for the", "Initialize a model instance keyword arguments initialize corresponding fields according to the keys.", "class Meta: # The following needs to be defined here, not in meta.Meta", "AttributeError: raise TypeError(f\"Invalid argument type ({type(arg)}) to Model __init__ method. Expected: dictionary or", "any restriction. Returns: Dict[str, Field]: Dictionary of all fields \"\"\" return self.Meta._field_defs_ def", "exclude_fields): continue if not self._is_valid_value_(k, v.value, exclude_values): continue yield k, v # def", "be a proxy model. It does not have a valid base or super", "_set_meta_attr('abstract', False, inherit=False) if meta_attrs['abstract']: meta_attrs['db_table'] = Void else: _set_meta_attr('db_table', class_name, inherit=False) new_attrs", "def get_rand(): return random.randint(1, 9) class User(Base): class Meta: db_table = 'myapp_user' abstract", "in attrs: raise ValueError(f\"Proxy model '{class_name}' can not define new field: {n}\") v.name", "v is field value Yields: tuple: field_name, field_value \"\"\" for k, f in", "up (data update) or down (data retrieval). Defaults to False. Yields: Iterator[Tuple[str, Any]]:", "for k,v in kwargs.items(): setattr(self, k, v) def __iter__(self): \"\"\"Iter through k, v", "self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k) fields[k].value = v elif k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k)", "def __repr__(self): reprs = [] for k, v in self: reprs.append(f'{k}={repr(v)}') body =", "self.__dict__[k] = v return fields = self.Meta._fields_ if k not in fields: raise", "= 'Copyright © <NAME> <https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1' import inspect", "given for attribute '{k}' in class '{class_name}.Meta'. Required {required_type}.\") meta_attrs[k] = given_value except", "exclude_values): continue yield k, v # def _get_data_for_valid_values_(self, data, up=False, gen=False): # if", "# if not self._is_valid_value_(k, v, exclude_values): # continue # if gen: # yield", "= self.Meta.exclude_fields_up else: fields = self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down all_fields = self._get_all_fields_() for", "in attrs: new_attrs[n] = attrs[n] # we do this after finalizing meta_attr def", "the key is valid considering include/exclude down keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down)", "Defaults to False (down). Yields: str: field name \"\"\" if up: fields =", "\"{table}\" ALTER COLUMN \"{column}\" SET DEFAULT NOW()',)) updated_at = Field('TIMESTAMP WITH TIME ZONE',", "= type(data)() # for k,v in data.items(): # if not self._is_valid_value_(k, v, exclude_values):", "{'':()} exclude_values_down = {'':()} #internal _field_defs_: Dict[str, Field] _fields_: Dict[str, FieldValue] _fromdb_: List[str]", "It is excluded using either exclude_fields_down or exclude_values_down in {self.__class__.__name__} Meta class. Or", "import timestamp class Base(Model): class Meta: pk = 'id' # setting primary key,", "Iterator[Tuple[str, str]]: Yields column, direction \"\"\" ordering = self.Meta.ordering direction = 'ASC' for", "hook. Override to run post update cleanup. Args: db (DB): db handle. \"\"\"", "FieldValue] _fromdb_: List[str] def __init__(self, *args, **kwargs): class Meta: _fields_: Dict[str, FieldValue] =", "model definition.\") def __delattr__(self, k): raise NotImplementedError(\"You can not delete model attributes outside", "ordering = () fields_up = () fields_down = () exclude_fields_up = () exclude_fields_down", "\"\"\" pass async def _pre_delete_(self, db): \"\"\"Pre-delete hook. Override to run pre delete", "valid for down (data retrieval) \"\"\" return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v) def _is_valid_up_(self,", "db): \"\"\"Pre-update hook. Override to run pre update cleanup. Args: db (DB): db", "email = Field('varchar(255)') password = Field('varchar(255)') profession = Field('varchar(255)', default='Unknown') random = Field('integer',", "if o.startswith('-'): direction = 'DESC' o = o[1:] elif o.startswith('+'): o = o[1:]", "must not start with underscore.\") if meta_attrs['proxy'] and n in attrs: raise ValueError(f\"Proxy", "_fields_: Dict[str, FieldValue] _fromdb_: List[str] def __init__(self, *args, **kwargs): class Meta: _fields_: Dict[str,", "of argument is provided. ## Special Model Meta attribute `f`: You can access", "\"\"\"Get primary column name \"\"\" return self.Meta.pk def _get_ordering_(self, quote: str) -> Iterator[Tuple[str,", "Iterator[Tuple[str, str]]: \"\"\"Yield each ordering from model parsed and converted to column, direction", "models. It's more than a good practice to define a Base model first:", "str) -> str: \"\"\"Return the field name if exists else raise AttributeError Args:", "'.join(reprs) return f'{self.__class__.__name__}({body})' async def _pre_save_(self, db): \"\"\"Pre-save hook. Override to run pre", "db): \"\"\"Pre-insert hook. Override to run post insert cleanup. Args: db (DB): db", "Required {required_type}.\") meta_attrs[k] = given_value except AttributeError: if inherit: v = getattr(BaseMeta, k,", "# default is False # ... etc... # see morm.meta.Meta for supported meta", "fixing mypy error: \"ModelType\" has no attribute \"Meta\" def __new__(mcs, class_name: str, bases:", "return n else: raise AttributeError(f\"No such field '{n}' in model '{class_name}'\") meta_attrs['f'] =", "method. Expected: dictionary or keyword argument\") for k,v in arg_items: setattr(self, k, v)", "type(data)() # for k,v in data.items(): # if not self._is_valid_value_(k, v, exclude_values): #", "abstract model or not \"\"\" return self.Meta.abstract def _is_proxy_(self) -> bool: \"\"\"Whether it", "excluded using either exclude_fields_down or exclude_values_down in {self.__class__.__name__} Meta class. Or it does", "It is excluded using either exclude_fields_up or exclude_values_up in {self.__class__.__name__} Meta class. Or", "when model is abstract and it gets inherited. meta_attrs['_field_defs_'][n] = v elif n", "for model \"\"\" return self.Meta.db_table def _is_abstract_(self) -> bool: \"\"\"Whether it's an abstract", "to be validated. up (bool, optional): whether up (data update) or down (data", "bool: \"\"\"Whether it is a proxy model or not \"\"\" return self.Meta.proxy def", "in meta_attrs['_field_defs_']: return n else: raise AttributeError(f\"No such field '{n}' in model '{class_name}'\")", "thus everything # included there will be blindly inherited, while these are passed", "field_value \"\"\" for k, f in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value): yield k, f.value", "This allows a spell-safe way to write the field names. If you misspell", "name for model \"\"\" return self.Meta.db_table def _is_abstract_(self) -> bool: \"\"\"Whether it's an", "internal use # morm.db must not be imported here. Meta = mt.Meta #", "List, Tuple, TypeVar, Union, Any, Iterator from collections import OrderedDict import copy from", "through the metaclasses __new__ methods and processed accordingly # to determine which one", "using either exclude_fields_down or exclude_values_down in {self.__class__.__name__} Meta class. Or it does not", "typing import Optional, Dict, List, Tuple, TypeVar, Union, Any, Iterator from collections import", "sql_onadd='PRIMARY KEY NOT NULL') created_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER", "BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract except AttributeError: raise TypeError(f\"This model '{class_name}' can not be", "\"\"\"Returns True if the value for the key is valid considering exclude values", "self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k: str) -> bool: \"\"\"Returns True if the key", "'{n}' in model '{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta']", "field name '{n}' in model '{class_name}'. \\ Field name must not start with", "attributes. name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') profession = Field('varchar(255)',", "v where k is field name and v is field value Yields: tuple:", "v): raise NotImplementedError class ModelType(type): Meta: typing.ClassVar # fixing mypy error: \"ModelType\" has", "inherited. meta_attrs['_field_defs_'][n] = v elif n in attrs: new_attrs[n] = attrs[n] # we", "definition.\") def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool: \"\"\"Returns True if the key", "type(given_value) required_type = type(v) if not given_type is required_type: raise TypeError(f\"Invalid type {given_type}", "is used in client Models, thus everything # included there will be blindly", "\"\"\" pass async def _post_delete_(self, db): \"\"\"Pre-delete hook. Override to run post delete", "**kwargs): class Meta: _fields_: Dict[str, FieldValue] = {} _fromdb_: List[str] = [] #", "from spelling mistake } ``` \"\"\" class Meta: # The following needs to", "str: \"\"\"Get db table name for model \"\"\" return self.Meta.db_table def _is_abstract_(self) ->", "attributes outside model definition.\") def __delattr__(self, k): raise NotImplementedError(\"You can not delete model", "n else: raise AttributeError(f\"No such field `{n}` in model `{self.__name__}`\") def _get_fields_(self, up=False)", "()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True)", "getattr(BaseMeta, k, v) # mutable values can be changed by other class meta", "column name \"\"\" return self.Meta.pk def _get_ordering_(self, quote: str) -> Iterator[Tuple[str, str]]: \"\"\"Yield", "self.Meta.exclude_values_down) def _is_valid_down_(self, k: str, v: Any) -> bool: \"\"\"Check whether the key", "o = o[1:] elif o.startswith('+'): o = o[1:] o = f\"{quote}{o}{quote}\" yield o,", "o.startswith('+'): o = o[1:] o = f\"{quote}{o}{quote}\" yield o, direction class ModelBase(metaclass=ModelType): \"\"\"Base", "a reserved attribute for class Meta. Error in model '{class_name}'\") given_type = type(given_value)", "internal=False): try: given_value = getattr(meta, k) if internal: raise ValueError(f\"'{k}' is a reserved", "to the column Yields: Iterator[Tuple[str, str]]: Yields column, direction \"\"\" ordering = self.Meta.ordering", "new_data def _get_db_table_(self) -> str: \"\"\"Get db table name for model \"\"\" return", "class, use Model instead. Raises: TypeError: When invalid type is encountered AttributeError: When", "v # if not gen: # return new_data def _get_db_table_(self) -> str: \"\"\"Get", "k in all_fields: if not self._is_valid_key_(k, fields, exclude_keys): continue yield k def _get_FieldValue_data_valid_(self,", "_is_valid_up_(self, k: str, v: Any) -> bool: \"\"\"Check whether the key and value", "Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE \"{table}\" ALTER COLUMN \"{column}\" SET", "will get `AttributeError`. ```python f = User.Meta.f my_data = { f.name: '<NAME>', #", "db handle. \"\"\" pass async def _post_update_(self, db): \"\"\"Pre-update hook. Override to run", "is ordered, officially from python 3.7 for n, v in _class_.__dict__.items(): if isinstance(v,", "def __init__(self, *args, **kwargs): class Meta: _fields_: Dict[str, FieldValue] = {} _fromdb_: List[str]", "return self.Meta.pk def _get_ordering_(self, quote: str) -> Iterator[Tuple[str, str]]: \"\"\"Yield each ordering from", "ModelBase(metaclass=ModelType): \"\"\"Base Model for all models. Do not inherit from this class, use", "\"\"\"Returns True if the value for the key is valid considering exclude up", "considering include/exclude up keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self, k: str,", "meta_attrs['proxy'] and n in attrs: raise ValueError(f\"Proxy model '{class_name}' can not define new", "'{n}' in model '{class_name}'. \\ Field name must not start with underscore.\") if", "handle. \"\"\" pass async def _pre_update_(self, db): \"\"\"Pre-update hook. Override to run pre", "considering exclude values \"\"\" if v is Void: return False if k in", "is excluded using either exclude_fields_down or exclude_values_down in {self.__class__.__name__} Meta class. Or it", "_is_valid_value_(self, k: str, v: Any, exclude_values: Dict[str, Tuple[Any]]) -> bool: \"\"\"Returns True if", "yield k, f.value def __delattr__(self, k): fields = self.Meta._fields_ if k in fields:", "o[1:] o = f\"{quote}{o}{quote}\" yield o, direction class ModelBase(metaclass=ModelType): \"\"\"Base Model for all", "copy from abc import ABCMeta from asyncpg import Record # type: ignore from", "return self.Meta._field_defs_ def _check_field_name_(self, n: str) -> str: \"\"\"Return the field name if", "meta attributes. name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') profession =", "values can be changed by other class meta change if mutable: meta_attrs[k] =", "self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up else: exclude_values = self.Meta.exclude_values_down fields = self.Meta.fields_down exclude_fields =", "criteria or down criteria. Defaults to False (down). Yields: str: field name \"\"\"", "Args: db (DB): db handle. \"\"\" pass async def _pre_delete_(self, db): \"\"\"Pre-delete hook.", "have any valid value.') raise AttributeError def __setattr__(self, k: str, v): if k", "_get_pk_(self) -> str: \"\"\"Get primary column name \"\"\" return self.Meta.pk def _get_ordering_(self, quote:", "`{k}`. It is excluded using either exclude_fields_down or exclude_values_down in {self.__class__.__name__} Meta class.", "attribute for class Meta. Error in model '{class_name}'\") given_type = type(given_value) required_type =", "_post_insert_(self, db): \"\"\"Pre-insert hook. Override to run post insert cleanup. Args: db (DB):", "_class_ = super().__new__(mcs, 'x_' + class_name, parents, attrs) BaseMeta = getattr(_class_, 'Meta', _Meta_)", "fields = self.Meta._fields_ if k not in fields: raise AttributeError(f\"No such field ('{k}')", "model could look like: ```python import random def get_rand(): return random.randint(1, 9) class", "in model `{self.__name__}`\") def _get_fields_(self, up=False) -> Iterator[str]: \"\"\"Yields field names that pass", "other class meta change if mutable: meta_attrs[k] = copy.deepcopy(v) else: meta_attrs[k] = v", "converted to column, direction direction is either `ASC` or `DESC` Args: quote (str):", "morm.meta as mt # for internal use # morm.db must not be imported", "Any]]: \"\"\"Yields valid key,value pairs from data. Validity is checked against include/exclude key/value", "(str): field name Raises: AttributeError: if field name does not exist Returns: str:", "self.Meta.proxy def _get_pk_(self) -> str: \"\"\"Get primary column name \"\"\" return self.Meta.pk def", "_pre_save_(self, db): \"\"\"Pre-save hook. Override to run pre save cleanup. Args: db (DB):", "fields = Meta._fields_ if k in fields: v = fields[k].value if self.__class__._is_valid_down_(k, v):", "direction \"\"\" ordering = self.Meta.ordering direction = 'ASC' for o in ordering: if", "my_data = { f.name: '<NAME>', # safe from spelling mistake f.profession: 'Teacher', #", "\"\"\"Whether it is a proxy model or not \"\"\" return self.Meta.proxy def _get_pk_(self)", "Meta: \"\"\"Meta that holds metadata for model \"\"\" # The following needs to", "fields: fields[k].delete_value() else: super().__delattr__(k) def __getattr__(self, k): Meta = self.__dict__['Meta'] fields = Meta._fields_", "_fromdb_: List[str] def __init__(self, *args, **kwargs): class Meta: _fields_: Dict[str, FieldValue] = {}", "in fields: return False return True def _is_valid_down_key_(self, k: str) -> bool: \"\"\"Returns", "Model instead. Raises: TypeError: When invalid type is encountered AttributeError: When misspelled fields", "= attrs[n] # we do this after finalizing meta_attr def _get_field_name(n: str) ->", "internal=True, mutable=True) if meta_attrs['proxy']: #proxy model inherits everything try: meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract']", "-> str: \"\"\"Return the field name if exists else raise AttributeError Args: n", "fields = self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down # new_data = type(data)() for k,v in", "invalid type is encountered AttributeError: When misspelled fields are tried to set. \"\"\"", "\"\"\" if v is Void: return False if k in exclude_values: if v", "Args: data (dict): data to be validated. up (bool, optional): whether up (data", "ordering = self.Meta.ordering direction = 'ASC' for o in ordering: if o.startswith('-'): direction", "# dict is ordered, officially from python 3.7 for n, v in _class_.__dict__.items():", "return True def _is_valid_up_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True if", "to determine which one should be inherited and which one should not. abstract", "exclude_values = self.Meta.exclude_values_down # new_data = type(data)() # for k,v in data.items(): #", "def _is_valid_up_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True if the value", "Returns: Dict[str, Field]: Dictionary of all fields \"\"\" return self.Meta._field_defs_ def _check_field_name_(self, n:", "\"\"\"Iter through k, v where k is field name and v is field", "class_name, bases, attrs) classcell = attrs.pop('__classcell__', None) class _Meta_(mt.Meta): pass meta = attrs.pop('Meta',", "str) -> str: if n in meta_attrs['_field_defs_']: return n else: raise AttributeError(f\"No such", "AttributeError: When misspelled fields are tried to set. \"\"\" class Meta: \"\"\"Meta that", "def __iter__(self): \"\"\"Iter through k, v where k is field name and v", "async def _post_insert_(self, db): \"\"\"Pre-insert hook. Override to run post insert cleanup. Args:", "all fields \"\"\" return self.Meta._field_defs_ def _check_field_name_(self, n: str) -> str: \"\"\"Return the", "raise AttributeError(f\"No such field ('{k}') in model '{self.__class__.__name__}''\") # v = fields[k].clean(v) #", "#internal _field_defs_: Dict[str, Field] _fields_: Dict[str, FieldValue] _fromdb_: List[str] def __init__(self, *args, **kwargs):", "= [] for k, v in self: reprs.append(f'{k}={repr(v)}') body = ', '.join(reprs) return", "processed accordingly # to determine which one should be inherited and which one", "o in ordering: if o.startswith('-'): direction = 'DESC' o = o[1:] elif o.startswith('+'):", "_fromdb_: List[str] = [] # super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta'] = Meta for k,", "table name for model \"\"\" return self.Meta.db_table def _is_abstract_(self) -> bool: \"\"\"Whether it's", "© <NAME> <https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1' import inspect import typing", "mistake } ``` \"\"\" class Meta: # The following needs to be defined", "up (bool, optional): up criteria or down criteria. Defaults to False (down). Yields:", "for subclasses of Model # excluding Model class itself. parents = tuple(b for", "hook. Override to run post save cleanup. Args: db (DB): db handle. \"\"\"", "if not self._is_valid_key_(k, fields, exclude_keys): continue yield k def _get_FieldValue_data_valid_(self, data: dict, up=False)", "= f\"{quote}{o}{quote}\" yield o, direction class ModelBase(metaclass=ModelType): \"\"\"Base Model for all models. Do", "return super().__new__(mcs, class_name, bases, new_attrs) def __setattr__(self, k, v): raise NotImplementedError(\"You can not", "key is valid considering exclude down values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_down) def", "in client Models, thus everything # included there will be blindly inherited, while", "n (str): field name Raises: AttributeError: if field name does not exist Returns:", "self._is_valid_value_(k, v.value, exclude_values): continue yield k, v # def _get_data_for_valid_values_(self, data, up=False, gen=False):", "= False ordering = () fields_up = () fields_down = () exclude_fields_up =", "not in fields: return False return True def _is_valid_down_key_(self, k: str) -> bool:", "value: {v}') def __repr__(self): reprs = [] for k, v in self: reprs.append(f'{k}={repr(v)}')", "be default ``` ## Initialize a model instance keyword arguments initialize corresponding fields", "ALTER COLUMN \"{column}\" SET DEFAULT NOW()',)) updated_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT", "morm.datetime import timestamp class Base(Model): class Meta: pk = 'id' # setting primary", "in ordering: if o.startswith('-'): direction = 'DESC' o = o[1:] elif o.startswith('+'): o", "update) \"\"\" return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v) def _get_all_fields_(self) -> Dict[str, Field]: \"\"\"Get", "n: str) -> str: \"\"\"Return the field name if exists else raise AttributeError", "allows a spell-safe way to write the field names. If you misspell the", "good practice to define a Base model first: ```python from morm.model import Model", "= 'id' '''Primary key''' db_table = Void abstract = True proxy = False", "= Meta._fields_ if k in fields: v = fields[k].value if self.__class__._is_valid_down_(k, v): return", "officially from python 3.7 for n, v in _class_.__dict__.items(): if isinstance(v, Field): if", "name \"\"\" if n in self.Meta._field_defs_: return n else: raise AttributeError(f\"No such field", "pre update cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_insert_(self,", "defaulted to 'id' abstract = True # postgresql example id = Field('SERIAL', sql_onadd='PRIMARY", "dictionary or keyword argument\") for k,v in arg_items: setattr(self, k, v) for k,v", "new_attrs['Meta'] = MetaClass if classcell is not None: new_attrs['__classcell__'] = classcell return super().__new__(mcs,", "Model(ModelBase): \"\"\"Base model to be inherited by other models. It's more than a", "n in attrs: raise ValueError(f\"Proxy model '{class_name}' can not define new field: {n}\")", "True def _is_valid_up_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True if the", "the key is valid considering include/exclude up keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up)", "_set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True)", "str: field name \"\"\" if n in self.Meta._field_defs_: return n else: raise AttributeError(f\"No", "for k, v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v) for arg in args: try:", "as a class raise TypeError(f\"Name 'Meta' is reserved for a class to pass", "handle. \"\"\" pass async def _pre_insert_(self, db): \"\"\"Pre-insert hook. Override to run pre", "k,v in kwargs.items(): setattr(self, k, v) def __iter__(self): \"\"\"Iter through k, v where", "raise AttributeError(f\"Name '{k} is reserved. You should not try to change it.\") if", "Meta attribute `f`: You can access field names from `ModelClass.Meta.f`. This allows a", "if isinstance(b, ModelType)) if not parents: return super().__new__(mcs, class_name, bases, attrs) classcell =", "look like: ```python import random def get_rand(): return random.randint(1, 9) class User(Base): class", "self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v) def _is_valid_up_(self, k: str, v: Any) -> bool: \"\"\"Check", "AttributeError(f'Invalid attempt to access field `{k}`. It is excluded using either exclude_fields_down or", "'profession': 'Teacher'}) User({'name': '<NAME>', 'profession': 'Teacher'}, age=34) User({'name': '<NAME>', 'profession': 'Teacher', 'active': True},", "= {} def _set_meta_attr(k, v, mutable=False, inherit=True, internal=False): try: given_value = getattr(meta, k)", "class to pass configuration or metadata of a model. Error in model '{class_name}'\")", "bool: \"\"\"Returns True if the value for the key is valid considering exclude", "from python 3.7 for n, v in _class_.__dict__.items(): if isinstance(v, Field): if n.startswith('_'):", "k: str) -> bool: \"\"\"Returns True if the key is valid considering include/exclude", "_FieldNames(): \"\"\"Access field names \"\"\" def __init__(self, func): self.__dict__['func'] = func def __getattr__(self,", "{n}\") v.name = n # v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field must not contain", "True # postgresql example id = Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL') created_at =", "db (DB): db handle. \"\"\" pass async def _post_insert_(self, db): \"\"\"Pre-insert hook. Override", "invalid value: {v}') def __repr__(self): reprs = [] for k, v in self:", "__init__(self, func): self.__dict__['func'] = func def __getattr__(self, k): return self.__dict__['func'](k) def __setattr__(self, k,", "User({'name': '<NAME>', 'profession': 'Teacher', 'active': True}, age=34) ``` Raises: TypeError: If invalid type", "{'':()}, mutable=True) _set_meta_attr('_field_defs_', {}, internal=True, mutable=True) if meta_attrs['proxy']: #proxy model inherits everything try:", "elif n in attrs: new_attrs[n] = attrs[n] # we do this after finalizing", "= FieldValue(v) for arg in args: try: arg_items = arg.items() except AttributeError: raise", "Then a minimal model could look like this: ```python class User(Base): name =", "_is_valid_down_key_(self, k: str) -> bool: \"\"\"Returns True if the key is valid considering", "import Record # type: ignore from morm.exceptions import ItemDoesNotExistError from morm.fields.field import Field,", "import Void import morm.meta as mt # for internal use # morm.db must", "\"\"\" for k, f in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value): yield k, f.value def", "v): return v raise AttributeError(f'Invalid attempt to access field `{k}`. It is excluded", "async def _pre_delete_(self, db): \"\"\"Pre-delete hook. Override to run pre delete cleanup. Args:", "from collections import OrderedDict import copy from abc import ABCMeta from asyncpg import", "# super().__setattr__(k, v) if self.__class__._is_valid_up_(k, v): if k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True", "AttributeError(f\"Name '{k} is reserved. You should not try to change it.\") if k.startswith('_'):", "is valid considering include/exclude up keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self,", "accordingly # to determine which one should be inherited and which one should", "for all models. Do not inherit from this class, use Model instead. Raises:", "True}, age=34) ``` Raises: TypeError: If invalid type of argument is provided. ##", "any valid value.') raise AttributeError def __setattr__(self, k: str, v): if k ==", "not set field `{k}`. It is excluded using either exclude_fields_up or exclude_values_up in", "Error in model '{class_name}'\") given_type = type(given_value) required_type = type(v) if not given_type", "pass async def _post_save_(self, db): \"\"\"Pre-save hook. Override to run post save cleanup.", "TABLE \"{table}\" ALTER COLUMN \"{column}\" SET DEFAULT NOW()',)) updated_at = Field('TIMESTAMP WITH TIME", "to set an invalid value: {v}') def __repr__(self): reprs = [] for k,", "()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down',", "Args: n (str): field name Raises: AttributeError: if field name does not exist", "the column Yields: Iterator[Tuple[str, str]]: Yields column, direction \"\"\" ordering = self.Meta.ordering direction", "v in _class_.__dict__.items(): if isinstance(v, Field): if n.startswith('_'): raise AttributeError(f\"Invalid field name '{n}'", "if isinstance(v, Field): if n.startswith('_'): raise AttributeError(f\"Invalid field name '{n}' in model '{class_name}'.", "used in client Models, thus everything # included there will be blindly inherited,", "pre save cleanup. Args: db (DB): db handle. \"\"\" pass async def _pre_delete_(self,", "_Meta_) meta_attrs = {} def _set_meta_attr(k, v, mutable=False, inherit=True, internal=False): try: given_value =", "\"\"\"Yields field names that pass include/exclude criteria Args: up (bool, optional): up criteria", "db (DB): db handle. \"\"\" pass async def _post_save_(self, db): \"\"\"Pre-save hook. Override", "NULL', value=timestamp) ``` Then a minimal model could look like this: ```python class", "-> bool: \"\"\"Check whether the key and value is valid for up (data", "Union, Any, Iterator from collections import OrderedDict import copy from abc import ABCMeta", "fields[k].value if self.__class__._is_valid_down_(k, v): return v raise AttributeError(f'Invalid attempt to access field `{k}`.", "# meta.Meta is used in client Models, thus everything # included there will", "\"\"\"Returns True if the key is valid considering include/exclude down keys \"\"\" return", "arguments must be dictionaries of keys and values. Example: ```python User(name='<NAME>', profession='Teacher') User({'name':", "applying any restriction. Returns: Dict[str, Field]: Dictionary of all fields \"\"\" return self.Meta._field_defs_", "# we do this after finalizing meta_attr def _get_field_name(n: str) -> str: if", "import Field, FieldValue from morm.types import Void import morm.meta as mt # for", "'{k} is reserved. You should not try to change it.\") if k.startswith('_'): if", "be defined here, not in meta.Meta # meta.Meta is used in client Models,", "values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_down) def _is_valid_down_(self, k: str, v: Any) ->", "-> str: \"\"\"Get primary column name \"\"\" return self.Meta.pk def _get_ordering_(self, quote: str)", "# ... etc... # see morm.meta.Meta for supported meta attributes. name = Field('varchar(65)')", "up keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self, k: str, v: Any,", "inherit=True, internal=False): try: given_value = getattr(meta, k) if internal: raise ValueError(f\"'{k}' is a", "# postgresql example id = Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL') created_at = Field('TIMESTAMP", "_is_valid_up_key_(self, k: str) -> bool: \"\"\"Returns True if the key is valid considering", "model `{self.__name__}`\") def _get_fields_(self, up=False) -> Iterator[str]: \"\"\"Yields field names that pass include/exclude", "def _set_meta_attr(k, v, mutable=False, inherit=True, internal=False): try: given_value = getattr(meta, k) if internal:", "and v is field value Yields: tuple: field_name, field_value \"\"\" for k, f", "is field value Yields: tuple: field_name, field_value \"\"\" for k, f in self.Meta._fields_.items():", "= BaseMeta.abstract except AttributeError: raise TypeError(f\"This model '{class_name}' can not be a proxy", "meta change if mutable: meta_attrs[k] = copy.deepcopy(v) else: meta_attrs[k] = v _set_meta_attr('proxy', False)", "meta_attrs['abstract']: meta_attrs['db_table'] = Void else: _set_meta_attr('db_table', class_name, inherit=False) new_attrs = {} # dict", "or metadata of a model. Error in model '{class_name}'\") _class_ = super().__new__(mcs, 'x_'", "using either exclude_fields_up or exclude_values_up in {self.__class__.__name__} Meta class. Or you are trying", "valid considering include/exclude down keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k:", "def _post_save_(self, db): \"\"\"Pre-save hook. Override to run post save cleanup. Args: db", "'Teacher', # safe from spelling mistake 'hobby': 'Gardenning', # unsafe from spelling mistake", "raise ValueError(f\"'{k}' is a reserved attribute for class Meta. Error in model '{class_name}'\")", "use # morm.db must not be imported here. Meta = mt.Meta # For", "name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') profession = Field('varchar(255)', default='Unknown')", "post insert cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_update_(self,", "User({'name': '<NAME>', 'profession': 'Teacher'}) User({'name': '<NAME>', 'profession': 'Teacher'}, age=34) User({'name': '<NAME>', 'profession': 'Teacher',", "if the key is valid considering include/exclude down keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_down,", "encountered AttributeError: When misspelled fields are tried to set. \"\"\" class Meta: \"\"\"Meta", "asyncpg import Record # type: ignore from morm.exceptions import ItemDoesNotExistError from morm.fields.field import", "is reserved. You should not try to change it.\") if k.startswith('_'): if k.endswith('_'):", "```python f = User.Meta.f my_data = { f.name: '<NAME>', # safe from spelling", "() fields_down = () exclude_fields_up = () exclude_fields_down = () exclude_values_up = {'':()}", "inherited and which one should not. pk = 'id' '''Primary key''' db_table =", "'{class_name}'\") given_type = type(given_value) required_type = type(v) if not given_type is required_type: raise", "to run post update cleanup. Args: db (DB): db handle. \"\"\" pass class", "'Meta': raise AttributeError(f\"Name '{k} is reserved. You should not try to change it.\")", "()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_', {}, internal=True, mutable=True) if meta_attrs['proxy']:", "bool: \"\"\"Returns True if the key is valid considering include/exclude keys \"\"\" if", "which one should be inherited and which one should not. abstract = True", "True if the key is valid considering include/exclude up keys \"\"\" return self._is_valid_key_(k,", "MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta'] = MetaClass if classcell is not None:", "mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta'] = MetaClass if classcell is not None: new_attrs['__classcell__'] =", "bases, new_attrs) def __setattr__(self, k, v): raise NotImplementedError(\"You can not set model attributes", "\"\"\" pass async def _post_save_(self, db): \"\"\"Pre-save hook. Override to run post save", "name does not exist Returns: str: field name \"\"\" if n in self.Meta._field_defs_:", "self.Meta.exclude_fields_up else: fields = self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down all_fields = self._get_all_fields_() for k", "to column, direction direction is either `ASC` or `DESC` Args: quote (str): Quote", "value.') raise AttributeError def __setattr__(self, k: str, v): if k == 'Meta': raise", "tuple: field_name, field_value \"\"\" for k, f in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value): yield", "hook. Override to run pre insert cleanup. Args: db (DB): db handle. \"\"\"", "self.__dict__['Meta'] = Meta for k, v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v) for arg", "key is valid considering exclude values \"\"\" if v is Void: return False", "k, v in self: reprs.append(f'{k}={repr(v)}') body = ', '.join(reprs) return f'{self.__class__.__name__}({body})' async def", "yield k, v # def _get_data_for_valid_values_(self, data, up=False, gen=False): # if up: #", "Override to run post delete cleanup. Args: db (DB): db handle. \"\"\" pass", "-> Dict[str, Field]: \"\"\"Get all fields on model without applying any restriction. Returns:", "class Meta: _fields_: Dict[str, FieldValue] = {} _fromdb_: List[str] = [] # super(ModelBase,", "exist Returns: str: field name \"\"\" if n in self.Meta._field_defs_: return n else:", "spell-safe way to write the field names. If you misspell the name, you", "against include/exclude key/value criteria. Args: data (dict): data to be validated. up (bool,", "= ', '.join(reprs) return f'{self.__class__.__name__}({body})' async def _pre_save_(self, db): \"\"\"Pre-save hook. Override to", "-> bool: \"\"\"Returns True if the key is valid considering include/exclude keys \"\"\"", "def __setattr__(self, k, v): raise NotImplementedError class ModelType(type): Meta: typing.ClassVar # fixing mypy", "these are passed # through the metaclasses __new__ methods and processed accordingly #", "from morm.datetime import timestamp class Base(Model): class Meta: pk = 'id' # setting", "primary key, it is defaulted to 'id' abstract = True # postgresql example", "are reserved for predefined methods.') self.__dict__[k] = v return fields = self.Meta._fields_ if", "db (DB): db handle. \"\"\" pass async def _pre_delete_(self, db): \"\"\"Pre-delete hook. Override", "gen: # return new_data def _get_db_table_(self) -> str: \"\"\"Get db table name for", "mutable values can be changed by other class meta change if mutable: meta_attrs[k]", "n.startswith('_'): raise AttributeError(f\"Invalid field name '{n}' in model '{class_name}'. \\ Field name must", "all fields on model without applying any restriction. Returns: Dict[str, Field]: Dictionary of", "``` An advanced model could look like: ```python import random def get_rand(): return", "metadata of a model. Error in model '{class_name}'\") _class_ = super().__new__(mcs, 'x_' +", "Defaults to False. Yields: Iterator[Tuple[str, Any]]: Yields key, value pair \"\"\" if up:", "model or not \"\"\" return self.Meta.proxy def _get_pk_(self) -> str: \"\"\"Get primary column", "value Yields: tuple: field_name, field_value \"\"\" for k, f in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k,", "\"\"\" ordering = self.Meta.ordering direction = 'ASC' for o in ordering: if o.startswith('-'):", "a class to pass configuration or metadata of a model. Error in model", "', '.join(reprs) return f'{self.__class__.__name__}({body})' async def _pre_save_(self, db): \"\"\"Pre-save hook. Override to run", "exclude_keys: return False if fields and k not in fields: return False return", "is valid for down (data retrieval) \"\"\" return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v) def", "\"\"\"Pre-insert hook. Override to run pre insert cleanup. Args: db (DB): db handle.", "\"\"\" pass class Model(ModelBase): \"\"\"Base model to be inherited by other models. It's", "handle. \"\"\" pass async def _post_update_(self, db): \"\"\"Pre-update hook. Override to run post", "can not delete model attributes outside model definition.\") def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str])", "exclude_values: Dict[str, Tuple[Any]]) -> bool: \"\"\"Returns True if the value for the key", "primary column name \"\"\" return self.Meta.pk def _get_ordering_(self, quote: str) -> Iterator[Tuple[str, str]]:", "# included there will be blindly inherited, while these are passed # through", "argument is provided. ## Special Model Meta attribute `f`: You can access field", "\"\"\" if up: fields = self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up else: fields = self.Meta.fields_down", "field name and v is field value Yields: tuple: field_name, field_value \"\"\" for", "from asyncpg import Record # type: ignore from morm.exceptions import ItemDoesNotExistError from morm.fields.field", "dict): # Ensure initialization is only performed for subclasses of Model # excluding", "= '<NAME> <<EMAIL>>' __copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__", "for up (data update) \"\"\" return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v) def _get_all_fields_(self) ->", "self.__dict__['Meta'] fields = Meta._fields_ if k in fields: v = fields[k].value if self.__class__._is_valid_down_(k,", "= {'':()} #internal _field_defs_: Dict[str, Field] _fields_: Dict[str, FieldValue] _fromdb_: List[str] def __init__(self,", "= { f.name: '<NAME>', # safe from spelling mistake f.profession: 'Teacher', # safe", "up: fields = self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up else: fields = self.Meta.fields_down exclude_keys =", "error: \"ModelType\" has no attribute \"Meta\" def __new__(mcs, class_name: str, bases: tuple, attrs:", "if meta_attrs['proxy'] and n in attrs: raise ValueError(f\"Proxy model '{class_name}' can not define", "the key is valid considering exclude down values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_down)", "be blindly inherited, while these are passed # through the metaclasses __new__ methods", "advanced model could look like: ```python import random def get_rand(): return random.randint(1, 9)", "If you misspell the name, you will get `AttributeError`. ```python f = User.Meta.f", "Dict, List, Tuple, TypeVar, Union, Any, Iterator from collections import OrderedDict import copy", "fields_up = () fields_down = () exclude_fields_up = () exclude_fields_down = () exclude_values_up", "in class '{class_name}.Meta'. Required {required_type}.\") meta_attrs[k] = given_value except AttributeError: if inherit: v", "is False # ... etc... # see morm.meta.Meta for supported meta attributes. name", "_set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_', {}, internal=True, mutable=True) if meta_attrs['proxy']: #proxy model inherits everything", "Yields: str: field name \"\"\" if up: fields = self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up", "# morm.db must not be imported here. Meta = mt.Meta # For client", "argument type ({type(arg)}) to Model __init__ method. Expected: dictionary or keyword argument\") for", "<NAME> <https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1' import inspect import typing from", "fields:Tuple[str], exclude_keys:Tuple[str]) -> bool: \"\"\"Returns True if the key is valid considering include/exclude", "# continue # if gen: # yield k, v # else: # new_data[k]", "attrs: dict): # Ensure initialization is only performed for subclasses of Model #", "v): if k == 'Meta': raise AttributeError(f\"Name '{k} is reserved. You should not", "outside model definition.\") def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool: \"\"\"Returns True if", "__repr__(self): reprs = [] for k, v in self: reprs.append(f'{k}={repr(v)}') body = ',", "# through the metaclasses __new__ methods and processed accordingly # to determine which", "or exclude_values_up in {self.__class__.__name__} Meta class. Or you are trying to set an", "# else: # new_data[k] = v # if not gen: # return new_data", "TypeError(f\"Invalid type {given_type} given for attribute '{k}' in class '{class_name}.Meta'. Required {required_type}.\") meta_attrs[k]", "+ class_name, parents, attrs) BaseMeta = getattr(_class_, 'Meta', _Meta_) meta_attrs = {} def", "should be inherited and which one should not. pk = 'id' '''Primary key'''", "= v elif k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can not set field", "and value is valid for up (data update) \"\"\" return self._is_valid_up_key_(k) and self._is_valid_up_value_(k,", "= tuple(b for b in bases if isinstance(b, ModelType)) if not parents: return", "and converted to column, direction direction is either `ASC` or `DESC` Args: quote", "parents, attrs) BaseMeta = getattr(_class_, 'Meta', _Meta_) meta_attrs = {} def _set_meta_attr(k, v,", "= self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down all_fields = self._get_all_fields_() for k in all_fields: if", "exclude_keys): continue yield k def _get_FieldValue_data_valid_(self, data: dict, up=False) -> Iterator[Tuple[str, Any]]: \"\"\"Yields", "self.Meta.exclude_values_down fields = self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down # new_data = type(data)() for k,v", "Meta: # The following needs to be defined here, not in meta.Meta #", "self.__class__._is_valid_down_(k, f.value): yield k, f.value def __delattr__(self, k): fields = self.Meta._fields_ if k", "= given_value except AttributeError: if inherit: v = getattr(BaseMeta, k, v) # mutable", "str) -> bool: \"\"\"Returns True if the key is valid considering include/exclude down", "isinstance(b, ModelType)) if not parents: return super().__new__(mcs, class_name, bases, attrs) classcell = attrs.pop('__classcell__',", "= mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta'] = MetaClass if classcell is not None: new_attrs['__classcell__']", "is a reserved attribute for class Meta. Error in model '{class_name}'\") given_type =", "Void: return False if k in exclude_values: if v in exclude_values[k]: return False", "id = Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL') created_at = Field('TIMESTAMP WITH TIME ZONE',", "setting primary key, it is defaulted to 'id' abstract = True # postgresql", "ModelType)) if not parents: return super().__new__(mcs, class_name, bases, attrs) classcell = attrs.pop('__classcell__', None)", "in exclude_values[k]: return False if '' in exclude_values and v in exclude_values['']: return", "meta_attrs['abstract'] = BaseMeta.abstract except AttributeError: raise TypeError(f\"This model '{class_name}' can not be a", "criteria. Defaults to False (down). Yields: str: field name \"\"\" if up: fields", "self.Meta.exclude_fields_up else: exclude_values = self.Meta.exclude_values_down fields = self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down # new_data", "down (data retrieval) \"\"\" return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v) def _is_valid_up_(self, k: str,", "# return new_data def _get_db_table_(self) -> str: \"\"\"Get db table name for model", "db (DB): db handle. \"\"\" pass async def _post_delete_(self, db): \"\"\"Pre-delete hook. Override", "'[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1' import inspect import typing from typing import Optional, Dict,", "class. Or you are trying to set an invalid value: {v}') def __repr__(self):", "... etc... # see morm.meta.Meta for supported meta attributes. name = Field('varchar(65)') email", "not. pk = 'id' '''Primary key''' db_table = Void abstract = True proxy", "random.randint(1, 9) class User(Base): class Meta: db_table = 'myapp_user' abstract = False #", "Model # excluding Model class itself. parents = tuple(b for b in bases", "value for the key is valid considering exclude values \"\"\" if v is", "new_attrs[n] = attrs[n] # we do this after finalizing meta_attr def _get_field_name(n: str)", "fields are tried to set. \"\"\" class Meta: \"\"\"Meta that holds metadata for", "try to change it.\") if k.startswith('_'): if k.endswith('_'): raise AttributeError('_<name>_ such names are", "-> bool: \"\"\"Whether it is a proxy model or not \"\"\" return self.Meta.proxy", "async def _pre_insert_(self, db): \"\"\"Pre-insert hook. Override to run pre insert cleanup. Args:", "_field_defs_: Dict[str, Field] _fields_: Dict[str, FieldValue] _fromdb_: List[str] def __init__(self, *args, **kwargs): class", "# function can be default ``` ## Initialize a model instance keyword arguments", "fields = self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up else: exclude_values = self.Meta.exclude_values_down fields = self.Meta.fields_down", "direction class ModelBase(metaclass=ModelType): \"\"\"Base Model for all models. Do not inherit from this", "'<NAME>', 'profession': 'Teacher'}, age=34) User({'name': '<NAME>', 'profession': 'Teacher', 'active': True}, age=34) ``` Raises:", "Tuple, TypeVar, Union, Any, Iterator from collections import OrderedDict import copy from abc", "for model \"\"\" # The following needs to be defined here, not in", "valid considering exclude values \"\"\" if v is Void: return False if k", "= getattr(meta, k) if internal: raise ValueError(f\"'{k}' is a reserved attribute for class", "of Model # excluding Model class itself. parents = tuple(b for b in", "in _class_.__dict__.items(): if isinstance(v, Field): if n.startswith('_'): raise AttributeError(f\"Invalid field name '{n}' in", "db (DB): db handle. \"\"\" pass async def _post_update_(self, db): \"\"\"Pre-update hook. Override", "exclude_fields_down = () exclude_values_up = {'':()} exclude_values_down = {'':()} #internal _field_defs_: Dict[str, Field]", "Override to run pre update cleanup. Args: db (DB): db handle. \"\"\" pass", "Base model first: ```python from morm.model import Model from morm.datetime import timestamp class", "exclude_values[k]: return False if '' in exclude_values and v in exclude_values['']: return False", "new_data = type(data)() for k,v in data.items(): if not self._is_valid_key_(k, fields, exclude_fields): continue", "is False proxy = False # default is False # ... etc... #", "proxy = False # default is False # ... etc... # see morm.meta.Meta", "exclude_values = self.Meta.exclude_values_down fields = self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down # new_data = type(data)()", "f'{self.__class__.__name__}({body})' async def _pre_save_(self, db): \"\"\"Pre-save hook. Override to run pre save cleanup.", "= MetaClass if classcell is not None: new_attrs['__classcell__'] = classcell return super().__new__(mcs, class_name,", "to run post save cleanup. Args: db (DB): db handle. \"\"\" pass async", "k, v # def _get_data_for_valid_values_(self, data, up=False, gen=False): # if up: # exclude_values", "outside model definition.\") def __delattr__(self, k): raise NotImplementedError(\"You can not delete model attributes", "example id = Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL') created_at = Field('TIMESTAMP WITH TIME", "inherited by other models. It's more than a good practice to define a", "Expected: dictionary or keyword argument\") for k,v in arg_items: setattr(self, k, v) for", "if self.__class__._is_valid_down_(k, v): return v raise AttributeError(f'Invalid attempt to access field `{k}`. It", "_set_meta_attr('db_table', class_name, inherit=False) new_attrs = {} # dict is ordered, officially from python", "valid considering include/exclude keys \"\"\" if k in exclude_keys: return False if fields", "Args: quote (str): Quote to apply to the column Yields: Iterator[Tuple[str, str]]: Yields", "you will get `AttributeError`. ```python f = User.Meta.f my_data = { f.name: '<NAME>',", "up: exclude_values = self.Meta.exclude_values_up fields = self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up else: exclude_values =", "str: field name \"\"\" if up: fields = self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up else:", "n in self.Meta._field_defs_: return n else: raise AttributeError(f\"No such field `{n}` in model", "\"\"\" return self.Meta.db_table def _is_abstract_(self) -> bool: \"\"\"Whether it's an abstract model or", "model '{class_name}'\") _class_ = super().__new__(mcs, 'x_' + class_name, parents, attrs) BaseMeta = getattr(_class_,", "_get_db_table_(self) -> str: \"\"\"Get db table name for model \"\"\" return self.Meta.db_table def", "checked against include/exclude key/value criteria. Args: data (dict): data to be validated. up", "if v is Void: return False if k in exclude_values: if v in", "cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_delete_(self, db): \"\"\"Pre-delete", "False, inherit=False) if meta_attrs['abstract']: meta_attrs['db_table'] = Void else: _set_meta_attr('db_table', class_name, inherit=False) new_attrs =", "db): \"\"\"Pre-delete hook. Override to run post delete cleanup. Args: db (DB): db", "include/exclude keys \"\"\" if k in exclude_keys: return False if fields and k", "not delete model attributes outside model definition.\") def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) ->", "like: ```python import random def get_rand(): return random.randint(1, 9) class User(Base): class Meta:", "run post save cleanup. Args: db (DB): db handle. \"\"\" pass async def", "if not given_type is required_type: raise TypeError(f\"Invalid type {given_type} given for attribute '{k}'", "practice to define a Base model first: ```python from morm.model import Model from", "return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k: str) -> bool: \"\"\"Returns True if", "if up: exclude_values = self.Meta.exclude_values_up fields = self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up else: exclude_values", "a spell-safe way to write the field names. If you misspell the name,", "<<EMAIL>>' __copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1'", "def _pre_save_(self, db): \"\"\"Pre-save hook. Override to run pre save cleanup. Args: db", "def _pre_insert_(self, db): \"\"\"Pre-insert hook. Override to run pre insert cleanup. Args: db", "field name \"\"\" if n in self.Meta._field_defs_: return n else: raise AttributeError(f\"No such", "self._is_valid_value_(k, v, self.Meta.exclude_values_up) def _is_valid_down_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True", "```python import random def get_rand(): return random.randint(1, 9) class User(Base): class Meta: db_table", "= type(v) if not given_type is required_type: raise TypeError(f\"Invalid type {given_type} given for", "f.profession: 'Teacher', # safe from spelling mistake 'hobby': 'Gardenning', # unsafe from spelling", "use class _FieldNames(): \"\"\"Access field names \"\"\" def __init__(self, func): self.__dict__['func'] = func", "Raises: TypeError: When invalid type is encountered AttributeError: When misspelled fields are tried", "the keys. Positional arguments must be dictionaries of keys and values. Example: ```python", "b in bases if isinstance(b, ModelType)) if not parents: return super().__new__(mcs, class_name, bases,", "a model instance keyword arguments initialize corresponding fields according to the keys. Positional", "class Model(ModelBase): \"\"\"Base model to be inherited by other models. It's more than", "the key and value is valid for up (data update) \"\"\" return self._is_valid_up_key_(k)", "down values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_down) def _is_valid_down_(self, k: str, v: Any)", "itself. parents = tuple(b for b in bases if isinstance(b, ModelType)) if not", "model to be inherited by other models. It's more than a good practice", "`f`: You can access field names from `ModelClass.Meta.f`. This allows a spell-safe way", "the name, you will get `AttributeError`. ```python f = User.Meta.f my_data = {", "field '{n}' in model '{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs)", "(mt.Meta,), meta_attrs) new_attrs['Meta'] = MetaClass if classcell is not None: new_attrs['__classcell__'] = classcell", "is field name and v is field value Yields: tuple: field_name, field_value \"\"\"", "(DB): db handle. \"\"\" pass async def _pre_update_(self, db): \"\"\"Pre-update hook. Override to", "the key and value is valid for down (data retrieval) \"\"\" return self._is_valid_down_key_(k)", "KEY NOT NULL') created_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE", "such field '{n}' in model '{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta', (mt.Meta,),", "you misspell the name, you will get `AttributeError`. ```python f = User.Meta.f my_data", "for attribute '{k}' in class '{class_name}.Meta'. Required {required_type}.\") meta_attrs[k] = given_value except AttributeError:", "return self.Meta.proxy def _get_pk_(self) -> str: \"\"\"Get primary column name \"\"\" return self.Meta.pk", "k, f in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value): yield k, f.value def __delattr__(self, k):", "handle. \"\"\" pass async def _post_save_(self, db): \"\"\"Pre-save hook. Override to run post", "metaclasses __new__ methods and processed accordingly # to determine which one should be", "'0.0.1' import inspect import typing from typing import Optional, Dict, List, Tuple, TypeVar,", "raise AttributeError(f\"Invalid field name '{n}' in model '{class_name}'. \\ Field name must not", "a good practice to define a Base model first: ```python from morm.model import", "exclude_values): # continue # if gen: # yield k, v # else: #", "one should not. pk = 'id' '''Primary key''' db_table = Void abstract =", "that holds metadata for model \"\"\" # The following needs to be defined", "except AttributeError: raise TypeError(f\"Invalid argument type ({type(arg)}) to Model __init__ method. Expected: dictionary", "TypeError: If invalid type of argument is provided. ## Special Model Meta attribute", "does not have any valid value.') raise AttributeError def __setattr__(self, k: str, v):", "bases if isinstance(b, ModelType)) if not parents: return super().__new__(mcs, class_name, bases, attrs) classcell", "in exclude_keys: return False if fields and k not in fields: return False", "Raises: AttributeError: if field name does not exist Returns: str: field name \"\"\"", "AttributeError('_<name>_ such names are reserved for predefined methods.') self.__dict__[k] = v return fields", "delete model attributes outside model definition.\") def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool:", "according to the keys. Positional arguments must be dictionaries of keys and values.", "__getattr__(self, k): return self.__dict__['func'](k) def __setattr__(self, k, v): raise NotImplementedError class ModelType(type): Meta:", "set. \"\"\" class Meta: \"\"\"Meta that holds metadata for model \"\"\" # The", "'{class_name}' can not define new field: {n}\") v.name = n # v.sql_conf.conf['table_name'] =", "Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') ``` An advanced model could look", "if not self._is_valid_value_(k, v.value, exclude_values): continue yield k, v # def _get_data_for_valid_values_(self, data,", "yield o, direction class ModelBase(metaclass=ModelType): \"\"\"Base Model for all models. Do not inherit", "exclude_values: if v in exclude_values[k]: return False if '' in exclude_values and v", "v: Any, exclude_values: Dict[str, Tuple[Any]]) -> bool: \"\"\"Returns True if the value for", "k: str, v: Any, exclude_values: Dict[str, Tuple[Any]]) -> bool: \"\"\"Returns True if the", "return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self, k: str, v: Any, exclude_values: Dict[str, Tuple[Any]])", "from this class, use Model instead. Raises: TypeError: When invalid type is encountered", "= 'ASC' for o in ordering: if o.startswith('-'): direction = 'DESC' o =", "morm.model import Model from morm.datetime import timestamp class Base(Model): class Meta: pk =", "'myapp_user' abstract = False # default is False proxy = False # default", "\"\"\" return self.Meta._field_defs_ def _check_field_name_(self, n: str) -> str: \"\"\"Return the field name", "and k not in fields: return False return True def _is_valid_down_key_(self, k: str)", "\"\"\" return self.Meta.abstract def _is_proxy_(self) -> bool: \"\"\"Whether it is a proxy model", "inherited and which one should not. abstract = True def __init__(self, *args, **kwargs):", "meta.Meta # meta.Meta is used in client Models, thus everything # included there", "raise AttributeError(f\"No such field '{n}' in model '{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass =", "if n.startswith('_'): raise AttributeError(f\"Invalid field name '{n}' in model '{class_name}'. \\ Field name", "base or super base non-proxy model\") else: _set_meta_attr('abstract', False, inherit=False) if meta_attrs['abstract']: meta_attrs['db_table']", "-> bool: \"\"\"Returns True if the key is valid considering include/exclude up keys", "v # else: # new_data[k] = v # if not gen: # return", "collections import OrderedDict import copy from abc import ABCMeta from asyncpg import Record", "run pre insert cleanup. Args: db (DB): db handle. \"\"\" pass async def", "super().__new__(mcs, class_name, bases, attrs) classcell = attrs.pop('__classcell__', None) class _Meta_(mt.Meta): pass meta =", "db handle. \"\"\" pass async def _pre_delete_(self, db): \"\"\"Pre-delete hook. Override to run", "valid for up (data update) \"\"\" return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v) def _get_all_fields_(self)", "raise AttributeError(f\"No such field `{n}` in model `{self.__name__}`\") def _get_fields_(self, up=False) -> Iterator[str]:", "key and value is valid for down (data retrieval) \"\"\" return self._is_valid_down_key_(k) and", "a minimal model could look like this: ```python class User(Base): name = Field('varchar(65)')", "async def _post_update_(self, db): \"\"\"Pre-update hook. Override to run post update cleanup. Args:", "if the key is valid considering include/exclude keys \"\"\" if k in exclude_keys:", "# unsafe from spelling mistake } ``` \"\"\" class Meta: # The following", "raise TypeError(f\"Invalid type {given_type} given for attribute '{k}' in class '{class_name}.Meta'. Required {required_type}.\")", "in {self.__class__.__name__} Meta class. Or you are trying to set an invalid value:", "not self._is_valid_key_(k, fields, exclude_fields): continue if not self._is_valid_value_(k, v.value, exclude_values): continue yield k,", "criteria Args: up (bool, optional): up criteria or down criteria. Defaults to False", "if not self._is_valid_key_(k, fields, exclude_fields): continue if not self._is_valid_value_(k, v.value, exclude_values): continue yield", "NotImplementedError(\"You can not delete model attributes outside model definition.\") def _is_valid_key_(self, k:str, fields:Tuple[str],", "value pair \"\"\" if up: exclude_values = self.Meta.exclude_values_up fields = self.Meta.fields_up exclude_fields =", "_set_meta_attr('_field_defs_', {}, internal=True, mutable=True) if meta_attrs['proxy']: #proxy model inherits everything try: meta_attrs['db_table'] =", "-> bool: \"\"\"Returns True if the key is valid considering include/exclude down keys", "TypeError(f\"This model '{class_name}' can not be a proxy model. It does not have", "morm.fields.field import Field, FieldValue from morm.types import Void import morm.meta as mt #", "Yields: tuple: field_name, field_value \"\"\" for k, f in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value):", "exclude_keys = self.Meta.exclude_fields_up else: fields = self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down all_fields = self._get_all_fields_()", "= attrs.pop('__classcell__', None) class _Meta_(mt.Meta): pass meta = attrs.pop('Meta', _Meta_) if not inspect.isclass(meta):", "self.Meta._field_defs_: return n else: raise AttributeError(f\"No such field `{n}` in model `{self.__name__}`\") def", "v in self: reprs.append(f'{k}={repr(v)}') body = ', '.join(reprs) return f'{self.__class__.__name__}({body})' async def _pre_save_(self,", "it gets inherited. meta_attrs['_field_defs_'][n] = v elif n in attrs: new_attrs[n] = attrs[n]", "\"\"\"Base Model for all models. Do not inherit from this class, use Model", "and v in exclude_values['']: return False return True def _is_valid_up_value_(self, k: str, v:", "classcell is not None: new_attrs['__classcell__'] = classcell return super().__new__(mcs, class_name, bases, new_attrs) def", "= v # if not gen: # return new_data def _get_db_table_(self) -> str:", "or keyword argument\") for k,v in arg_items: setattr(self, k, v) for k,v in", "field ('{k}') in model '{self.__class__.__name__}''\") # v = fields[k].clean(v) # super().__setattr__(k, v) if", "else: _set_meta_attr('db_table', class_name, inherit=False) new_attrs = {} # dict is ordered, officially from", "like this: ```python class User(Base): name = Field('varchar(65)') email = Field('varchar(255)') password =", "if the key is valid considering include/exclude up keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_up,", "to determine which one should be inherited and which one should not. pk", "morm.exceptions import ItemDoesNotExistError from morm.fields.field import Field, FieldValue from morm.types import Void import", "{} def _set_meta_attr(k, v, mutable=False, inherit=True, internal=False): try: given_value = getattr(meta, k) if", "either exclude_fields_up or exclude_values_up in {self.__class__.__name__} Meta class. Or you are trying to", "k, v) def __iter__(self): \"\"\"Iter through k, v where k is field name", "= True self.Meta._fromdb_.remove(k) fields[k].value = v elif k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise", "'id' # setting primary key, it is defaulted to 'id' abstract = True", "tuple, attrs: dict): # Ensure initialization is only performed for subclasses of Model", "= () fields_up = () fields_down = () exclude_fields_up = () exclude_fields_down =", "be inherited and which one should not. pk = 'id' '''Primary key''' db_table", "subclasses of Model # excluding Model class itself. parents = tuple(b for b", "kwargs.items(): setattr(self, k, v) def __iter__(self): \"\"\"Iter through k, v where k is", "include/exclude down keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k: str) ->", "see morm.meta.Meta for supported meta attributes. name = Field('varchar(65)') email = Field('varchar(255)') password", "Args: up (bool, optional): up criteria or down criteria. Defaults to False (down).", "either exclude_fields_down or exclude_values_down in {self.__class__.__name__} Meta class. Or it does not have", "When invalid type is encountered AttributeError: When misspelled fields are tried to set.", "up=False) -> Iterator[Tuple[str, Any]]: \"\"\"Yields valid key,value pairs from data. Validity is checked", "to write the field names. If you misspell the name, you will get", "values \"\"\" if v is Void: return False if k in exclude_values: if", "\"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_up) def _is_valid_down_value_(self, k: str, v: Any) -> bool:", "for a class to pass configuration or metadata of a model. Error in", "for internal use # morm.db must not be imported here. Meta = mt.Meta", "= v elif n in attrs: new_attrs[n] = attrs[n] # we do this", "self._is_valid_up_value_(k, v) def _get_all_fields_(self) -> Dict[str, Field]: \"\"\"Get all fields on model without", "to access field `{k}`. It is excluded using either exclude_fields_down or exclude_values_down in", "pass async def _post_update_(self, db): \"\"\"Pre-update hook. Override to run post update cleanup.", "should be inherited and which one should not. abstract = True def __init__(self,", "class '{class_name}.Meta'. Required {required_type}.\") meta_attrs[k] = given_value except AttributeError: if inherit: v =", "for supported meta attributes. name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)')", "reprs.append(f'{k}={repr(v)}') body = ', '.join(reprs) return f'{self.__class__.__name__}({body})' async def _pre_save_(self, db): \"\"\"Pre-save hook.", "def __getattr__(self, k): return self.__dict__['func'](k) def __setattr__(self, k, v): raise NotImplementedError class ModelType(type):", "Ensure initialization is only performed for subclasses of Model # excluding Model class", "__license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1' import inspect import typing from typing import", "\"\"\"Get db table name for model \"\"\" return self.Meta.db_table def _is_abstract_(self) -> bool:", "exclude_values_up = {'':()} exclude_values_down = {'':()} #internal _field_defs_: Dict[str, Field] _fields_: Dict[str, FieldValue]", "= True # postgresql example id = Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL') created_at", "random = Field('integer', default=get_rand) # function can be default ``` ## Initialize a", "model. It does not have a valid base or super base non-proxy model\")", "field `{n}` in model `{self.__name__}`\") def _get_fields_(self, up=False) -> Iterator[str]: \"\"\"Yields field names", "class User(Base): class Meta: db_table = 'myapp_user' abstract = False # default is", "new_data[k] = v # if not gen: # return new_data def _get_db_table_(self) ->", "An advanced model could look like: ```python import random def get_rand(): return random.randint(1,", "'Meta' is reserved for a class to pass configuration or metadata of a", "__new__(mcs, class_name: str, bases: tuple, attrs: dict): # Ensure initialization is only performed", "quote: str) -> Iterator[Tuple[str, str]]: \"\"\"Yield each ordering from model parsed and converted", "k is field name and v is field value Yields: tuple: field_name, field_value", "for the key is valid considering exclude down values \"\"\" return self._is_valid_value_(k, v,", "self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value): yield k, f.value def __delattr__(self, k): fields = self.Meta._fields_", "# yield k, v # else: # new_data[k] = v # if not", "self._is_valid_key_(k, fields, exclude_keys): continue yield k def _get_FieldValue_data_valid_(self, data: dict, up=False) -> Iterator[Tuple[str,", "# v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field must not contain table_name, because it is", "safe from spelling mistake 'hobby': 'Gardenning', # unsafe from spelling mistake } ```", "such names are reserved for predefined methods.') self.__dict__[k] = v return fields =", "\"\"\"Yields valid key,value pairs from data. Validity is checked against include/exclude key/value criteria.", "Args: db (DB): db handle. \"\"\" pass async def _post_save_(self, db): \"\"\"Pre-save hook.", "password = Field('varchar(255)') profession = Field('varchar(255)', default='Unknown') random = Field('integer', default=get_rand) # function", "new_data = type(data)() # for k,v in data.items(): # if not self._is_valid_value_(k, v,", "meta_attr def _get_field_name(n: str) -> str: if n in meta_attrs['_field_defs_']: return n else:", "k: str, v): if k == 'Meta': raise AttributeError(f\"Name '{k} is reserved. You", "Dict[str, Tuple[Any]]) -> bool: \"\"\"Returns True if the value for the key is", "'''Primary key''' db_table = Void abstract = True proxy = False ordering =", "is valid considering include/exclude down keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self,", "AttributeError def __setattr__(self, k: str, v): if k == 'Meta': raise AttributeError(f\"Name '{k}", "k): Meta = self.__dict__['Meta'] fields = Meta._fields_ if k in fields: v =", "else: fields = self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down all_fields = self._get_all_fields_() for k in", "initialize corresponding fields according to the keys. Positional arguments must be dictionaries of", "= {'':()} exclude_values_down = {'':()} #internal _field_defs_: Dict[str, Field] _fields_: Dict[str, FieldValue] _fromdb_:", "for b in bases if isinstance(b, ModelType)) if not parents: return super().__new__(mcs, class_name,", "True if the value for the key is valid considering exclude down values", "Field): if n.startswith('_'): raise AttributeError(f\"Invalid field name '{n}' in model '{class_name}'. \\ Field", "if internal: raise ValueError(f\"'{k}' is a reserved attribute for class Meta. Error in", "= fields[k].clean(v) # super().__setattr__(k, v) if self.__class__._is_valid_up_(k, v): if k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_", "ordering from model parsed and converted to column, direction direction is either `ASC`", "Any]]: Yields key, value pair \"\"\" if up: exclude_values = self.Meta.exclude_values_up fields =", "Args: db (DB): db handle. \"\"\" pass class Model(ModelBase): \"\"\"Base model to be", "to the keys. Positional arguments must be dictionaries of keys and values. Example:", "to False (down). Yields: str: field name \"\"\" if up: fields = self.Meta.fields_up", "'profession': 'Teacher', 'active': True}, age=34) ``` Raises: TypeError: If invalid type of argument", "classcell return super().__new__(mcs, class_name, bases, new_attrs) def __setattr__(self, k, v): raise NotImplementedError(\"You can", "self.Meta._field_defs_ def _check_field_name_(self, n: str) -> str: \"\"\"Return the field name if exists", "Validity is checked against include/exclude key/value criteria. Args: data (dict): data to be", "self._get_all_fields_() for k in all_fields: if not self._is_valid_key_(k, fields, exclude_keys): continue yield k", "if the value for the key is valid considering exclude up values \"\"\"", "self.Meta.exclude_values_up fields = self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up else: exclude_values = self.Meta.exclude_values_down fields =", "direction = 'ASC' for o in ordering: if o.startswith('-'): direction = 'DESC' o", "considering exclude down values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_down) def _is_valid_down_(self, k: str,", "if gen: # yield k, v # else: # new_data[k] = v #", "must be dictionaries of keys and values. Example: ```python User(name='<NAME>', profession='Teacher') User({'name': '<NAME>',", "in fields: fields[k].delete_value() else: super().__delattr__(k) def __getattr__(self, k): Meta = self.__dict__['Meta'] fields =", "#TEST: Meta is restricted as a class raise TypeError(f\"Name 'Meta' is reserved for", "will be blindly inherited, while these are passed # through the metaclasses __new__", "(DB): db handle. \"\"\" pass async def _post_save_(self, db): \"\"\"Pre-save hook. Override to", "'{class_name}.Meta'. Required {required_type}.\") meta_attrs[k] = given_value except AttributeError: if inherit: v = getattr(BaseMeta,", "(DB): db handle. \"\"\" pass async def _pre_insert_(self, db): \"\"\"Pre-insert hook. Override to", "NULL') created_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE \"{table}\" ALTER", "FieldValue from morm.types import Void import morm.meta as mt # for internal use", "data.items(): if not self._is_valid_key_(k, fields, exclude_fields): continue if not self._is_valid_value_(k, v.value, exclude_values): continue", "up (data update) \"\"\" return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v) def _get_all_fields_(self) -> Dict[str,", "gen: # yield k, v # else: # new_data[k] = v # if", "= v _set_meta_attr('proxy', False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up',", "considering include/exclude keys \"\"\" if k in exclude_keys: return False if fields and", "It does not have a valid base or super base non-proxy model\") else:", "or down criteria. Defaults to False (down). Yields: str: field name \"\"\" if", "v.value, exclude_values): continue yield k, v # def _get_data_for_valid_values_(self, data, up=False, gen=False): #", "type {given_type} given for attribute '{k}' in class '{class_name}.Meta'. Required {required_type}.\") meta_attrs[k] =", "such field ('{k}') in model '{self.__class__.__name__}''\") # v = fields[k].clean(v) # super().__setattr__(k, v)", "a class raise TypeError(f\"Name 'Meta' is reserved for a class to pass configuration", "async def _pre_save_(self, db): \"\"\"Pre-save hook. Override to run pre save cleanup. Args:", "raise NotImplementedError class ModelType(type): Meta: typing.ClassVar # fixing mypy error: \"ModelType\" has no", "name if exists else raise AttributeError Args: n (str): field name Raises: AttributeError:", "# excluding Model class itself. parents = tuple(b for b in bases if", "pass async def _post_insert_(self, db): \"\"\"Pre-insert hook. Override to run post insert cleanup.", "default='Unknown') random = Field('integer', default=get_rand) # function can be default ``` ## Initialize", "can not set model attributes outside model definition.\") def __delattr__(self, k): raise NotImplementedError(\"You", "self.Meta.abstract def _is_proxy_(self) -> bool: \"\"\"Whether it is a proxy model or not", "are passed # through the metaclasses __new__ methods and processed accordingly # to", "inspect import typing from typing import Optional, Dict, List, Tuple, TypeVar, Union, Any,", "is valid for up (data update) \"\"\" return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v) def", "import Optional, Dict, List, Tuple, TypeVar, Union, Any, Iterator from collections import OrderedDict", "reserved for a class to pass configuration or metadata of a model. Error", "down keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k: str) -> bool:", "initialization is only performed for subclasses of Model # excluding Model class itself.", "can be default ``` ## Initialize a model instance keyword arguments initialize corresponding", "in meta.Meta # meta.Meta is used in client Models, thus everything # included", "AttributeError: if field name does not exist Returns: str: field name \"\"\" if", "_set_meta_attr(k, v, mutable=False, inherit=True, internal=False): try: given_value = getattr(meta, k) if internal: raise", "def _get_pk_(self) -> str: \"\"\"Get primary column name \"\"\" return self.Meta.pk def _get_ordering_(self,", "pass async def _post_delete_(self, db): \"\"\"Pre-delete hook. Override to run post delete cleanup.", "super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta'] = Meta for k, v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] =", "fields[k].delete_value() else: super().__delattr__(k) def __getattr__(self, k): Meta = self.__dict__['Meta'] fields = Meta._fields_ if", "Meta. Error in model '{class_name}'\") given_type = type(given_value) required_type = type(v) if not", "Args: db (DB): db handle. \"\"\" pass async def _pre_update_(self, db): \"\"\"Pre-update hook.", "'DESC' o = o[1:] elif o.startswith('+'): o = o[1:] o = f\"{quote}{o}{quote}\" yield", "__iter__(self): \"\"\"Iter through k, v where k is field name and v is", "= getattr(_class_, 'Meta', _Meta_) meta_attrs = {} def _set_meta_attr(k, v, mutable=False, inherit=True, internal=False):", "predefined methods.') self.__dict__[k] = v return fields = self.Meta._fields_ if k not in", "Raises: TypeError: If invalid type of argument is provided. ## Special Model Meta", "-> bool: \"\"\"Whether it's an abstract model or not \"\"\" return self.Meta.abstract def", "_is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool: \"\"\"Returns True if the key is valid", "Meta: _fields_: Dict[str, FieldValue] = {} _fromdb_: List[str] = [] # super(ModelBase, self).__setattr__('Meta',", "_check_field_name_(self, n: str) -> str: \"\"\"Return the field name if exists else raise", "more than a good practice to define a Base model first: ```python from", "# if not gen: # return new_data def _get_db_table_(self) -> str: \"\"\"Get db", "for k in all_fields: if not self._is_valid_key_(k, fields, exclude_keys): continue yield k def", "else: super().__delattr__(k) def __getattr__(self, k): Meta = self.__dict__['Meta'] fields = Meta._fields_ if k", "have a valid base or super base non-proxy model\") else: _set_meta_attr('abstract', False, inherit=False)", "reserved attribute for class Meta. Error in model '{class_name}'\") given_type = type(given_value) required_type", "False if '' in exclude_values and v in exclude_values['']: return False return True", "name Raises: AttributeError: if field name does not exist Returns: str: field name", "_set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_', {}, internal=True, mutable=True) if meta_attrs['proxy']: #proxy", "= type(given_value) required_type = type(v) if not given_type is required_type: raise TypeError(f\"Invalid type", "v raise AttributeError(f'Invalid attempt to access field `{k}`. It is excluded using either", "\"\"\"Pre-save hook. Override to run post save cleanup. Args: db (DB): db handle.", "if not gen: # return new_data def _get_db_table_(self) -> str: \"\"\"Get db table", "valid considering exclude down values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_down) def _is_valid_down_(self, k:", "update cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_insert_(self, db):", "str]]: Yields column, direction \"\"\" ordering = self.Meta.ordering direction = 'ASC' for o", "def _get_all_fields_(self) -> Dict[str, Field]: \"\"\"Get all fields on model without applying any", "class Meta. Error in model '{class_name}'\") given_type = type(given_value) required_type = type(v) if", "fields and k not in fields: return False return True def _is_valid_down_key_(self, k:", "= self.__dict__['Meta'] fields = Meta._fields_ if k in fields: v = fields[k].value if", "# fixing mypy error: \"ModelType\" has no attribute \"Meta\" def __new__(mcs, class_name: str,", "__getattr__(self, k): Meta = self.__dict__['Meta'] fields = Meta._fields_ if k in fields: v", "direction = 'DESC' o = o[1:] elif o.startswith('+'): o = o[1:] o =", "mistake f.profession: 'Teacher', # safe from spelling mistake 'hobby': 'Gardenning', # unsafe from", "You can access field names from `ModelClass.Meta.f`. This allows a spell-safe way to", "model first: ```python from morm.model import Model from morm.datetime import timestamp class Base(Model):", "k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k) fields[k].value = v elif k in", "str: \"\"\"Get primary column name \"\"\" return self.Meta.pk def _get_ordering_(self, quote: str) ->", "NotImplementedError(\"You can not set model attributes outside model definition.\") def __delattr__(self, k): raise", "change it.\") if k.startswith('_'): if k.endswith('_'): raise AttributeError('_<name>_ such names are reserved for", "be inherited by other models. It's more than a good practice to define", "o = f\"{quote}{o}{quote}\" yield o, direction class ModelBase(metaclass=ModelType): \"\"\"Base Model for all models.", "def __setattr__(self, k, v): raise NotImplementedError(\"You can not set model attributes outside model", "through k, v where k is field name and v is field value", "in args: try: arg_items = arg.items() except AttributeError: raise TypeError(f\"Invalid argument type ({type(arg)})", "\"\"\"Meta that holds metadata for model \"\"\" # The following needs to be", "new field: {n}\") v.name = n # v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field must", "and which one should not. abstract = True def __init__(self, *args, **kwargs): super(Model,", "db (DB): db handle. \"\"\" pass async def _pre_insert_(self, db): \"\"\"Pre-insert hook. Override", "each ordering from model parsed and converted to column, direction direction is either", "## Special Model Meta attribute `f`: You can access field names from `ModelClass.Meta.f`.", "\"\"\"Check whether the key and value is valid for down (data retrieval) \"\"\"", "def _is_abstract_(self) -> bool: \"\"\"Whether it's an abstract model or not \"\"\" return", "include/exclude up keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self, k: str, v:", "-> Iterator[Tuple[str, Any]]: \"\"\"Yields valid key,value pairs from data. Validity is checked against", "if k in exclude_keys: return False if fields and k not in fields:", "try: arg_items = arg.items() except AttributeError: raise TypeError(f\"Invalid argument type ({type(arg)}) to Model", "field names. If you misspell the name, you will get `AttributeError`. ```python f", "k def _get_FieldValue_data_valid_(self, data: dict, up=False) -> Iterator[Tuple[str, Any]]: \"\"\"Yields valid key,value pairs", "str, v): if k == 'Meta': raise AttributeError(f\"Name '{k} is reserved. You should", "model '{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta'] = MetaClass", "# to determine which one should be inherited and which one should not.", "field names from `ModelClass.Meta.f`. This allows a spell-safe way to write the field", "_is_valid_down_(self, k: str, v: Any) -> bool: \"\"\"Check whether the key and value", "keyword arguments initialize corresponding fields according to the keys. Positional arguments must be", "in model '{class_name}'\") given_type = type(given_value) required_type = type(v) if not given_type is", "whether the key and value is valid for up (data update) \"\"\" return", "and self._is_valid_down_value_(k, v) def _is_valid_up_(self, k: str, v: Any) -> bool: \"\"\"Check whether", "instead. Raises: TypeError: When invalid type is encountered AttributeError: When misspelled fields are", "base non-proxy model\") else: _set_meta_attr('abstract', False, inherit=False) if meta_attrs['abstract']: meta_attrs['db_table'] = Void else:", "field name Raises: AttributeError: if field name does not exist Returns: str: field", "if meta_attrs['abstract']: meta_attrs['db_table'] = Void else: _set_meta_attr('db_table', class_name, inherit=False) new_attrs = {} #", "ZONE', sql_onadd='NOT NULL', value=timestamp) ``` Then a minimal model could look like this:", "__setattr__(self, k: str, v): if k == 'Meta': raise AttributeError(f\"Name '{k} is reserved.", "Dict[str, FieldValue] _fromdb_: List[str] def __init__(self, *args, **kwargs): class Meta: _fields_: Dict[str, FieldValue]", "fields[k].clean(v) # super().__setattr__(k, v) if self.__class__._is_valid_up_(k, v): if k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ =", "v # def _get_data_for_valid_values_(self, data, up=False, gen=False): # if up: # exclude_values =", "= o[1:] elif o.startswith('+'): o = o[1:] o = f\"{quote}{o}{quote}\" yield o, direction", "which one should be inherited and which one should not. pk = 'id'", "db): \"\"\"Pre-save hook. Override to run post save cleanup. Args: db (DB): db", "a model. Error in model '{class_name}'\") _class_ = super().__new__(mcs, 'x_' + class_name, parents,", "Meta for k, v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v) for arg in args:", "super().__new__(mcs, 'x_' + class_name, parents, attrs) BaseMeta = getattr(_class_, 'Meta', _Meta_) meta_attrs =", "Field('varchar(255)') profession = Field('varchar(255)', default='Unknown') random = Field('integer', default=get_rand) # function can be", "post save cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_delete_(self,", "Meta: pk = 'id' # setting primary key, it is defaulted to 'id'", "model\") else: _set_meta_attr('abstract', False, inherit=False) if meta_attrs['abstract']: meta_attrs['db_table'] = Void else: _set_meta_attr('db_table', class_name,", "NOW()',)) updated_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', value=timestamp) ``` Then a", "not have any valid value.') raise AttributeError def __setattr__(self, k: str, v): if", "insert cleanup. Args: db (DB): db handle. \"\"\" pass async def _pre_update_(self, db):", "Models, thus everything # included there will be blindly inherited, while these are", "} ``` \"\"\" class Meta: # The following needs to be defined here,", "default is False proxy = False # default is False # ... etc...", "abstract = True proxy = False ordering = () fields_up = () fields_down", "db handle. \"\"\" pass async def _post_insert_(self, db): \"\"\"Pre-insert hook. Override to run", "AttributeError(f'Can not set field `{k}`. It is excluded using either exclude_fields_up or exclude_values_up", "required_type: raise TypeError(f\"Invalid type {given_type} given for attribute '{k}' in class '{class_name}.Meta'. Required", "Model class itself. parents = tuple(b for b in bases if isinstance(b, ModelType))", "value=timestamp) ``` Then a minimal model could look like this: ```python class User(Base):", "k,v in arg_items: setattr(self, k, v) for k,v in kwargs.items(): setattr(self, k, v)", "class Meta: \"\"\"Meta that holds metadata for model \"\"\" # The following needs", "k: str, v: Any) -> bool: \"\"\"Check whether the key and value is", "## Initialize a model instance keyword arguments initialize corresponding fields according to the", "WITH TIME ZONE', sql_onadd='NOT NULL', value=timestamp) ``` Then a minimal model could look", "arg_items = arg.items() except AttributeError: raise TypeError(f\"Invalid argument type ({type(arg)}) to Model __init__", "= Field('varchar(255)') password = Field('varchar(255)') profession = Field('varchar(255)', default='Unknown') random = Field('integer', default=get_rand)", "AttributeError: raise TypeError(f\"This model '{class_name}' can not be a proxy model. It does", "is a proxy model or not \"\"\" return self.Meta.proxy def _get_pk_(self) -> str:", "(bool, optional): up criteria or down criteria. Defaults to False (down). Yields: str:", "__setattr__(self, k, v): raise NotImplementedError(\"You can not set model attributes outside model definition.\")", "fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k) fields[k].value = v elif k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else:", "Meta: typing.ClassVar # fixing mypy error: \"ModelType\" has no attribute \"Meta\" def __new__(mcs,", "__init__(self, *args, **kwargs): class Meta: _fields_: Dict[str, FieldValue] = {} _fromdb_: List[str] =", "= 'myapp_user' abstract = False # default is False proxy = False #", "func def __getattr__(self, k): return self.__dict__['func'](k) def __setattr__(self, k, v): raise NotImplementedError class", "is either `ASC` or `DESC` Args: quote (str): Quote to apply to the", "Field('varchar(255)') password = Field('varchar(255)') ``` An advanced model could look like: ```python import", "self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down all_fields = self._get_all_fields_() for k in all_fields: if not", "self.Meta._fields_[k] = FieldValue(v) for arg in args: try: arg_items = arg.items() except AttributeError:", "to 'id' abstract = True # postgresql example id = Field('SERIAL', sql_onadd='PRIMARY KEY", "= self.Meta.exclude_values_down # new_data = type(data)() # for k,v in data.items(): # if", "such field `{n}` in model `{self.__name__}`\") def _get_fields_(self, up=False) -> Iterator[str]: \"\"\"Yields field", "be inherited and which one should not. abstract = True def __init__(self, *args,", "arg_items: setattr(self, k, v) for k,v in kwargs.items(): setattr(self, k, v) def __iter__(self):", "without applying any restriction. Returns: Dict[str, Field]: Dictionary of all fields \"\"\" return", "meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract except AttributeError: raise TypeError(f\"This model '{class_name}' can", "return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v) def _is_valid_up_(self, k: str, v: Any) -> bool:", "attrs) BaseMeta = getattr(_class_, 'Meta', _Meta_) meta_attrs = {} def _set_meta_attr(k, v, mutable=False,", "not None: new_attrs['__classcell__'] = classcell return super().__new__(mcs, class_name, bases, new_attrs) def __setattr__(self, k,", "to change it.\") if k.startswith('_'): if k.endswith('_'): raise AttributeError('_<name>_ such names are reserved", "not self._is_valid_value_(k, v, exclude_values): # continue # if gen: # yield k, v", "super base non-proxy model\") else: _set_meta_attr('abstract', False, inherit=False) if meta_attrs['abstract']: meta_attrs['db_table'] = Void", "from `ModelClass.Meta.f`. This allows a spell-safe way to write the field names. If", "__init__ method. Expected: dictionary or keyword argument\") for k,v in arg_items: setattr(self, k,", "the field name if exists else raise AttributeError Args: n (str): field name", "define a Base model first: ```python from morm.model import Model from morm.datetime import", "self.Meta.exclude_fields_down # new_data = type(data)() for k,v in data.items(): if not self._is_valid_key_(k, fields,", "mt # for internal use # morm.db must not be imported here. Meta", "tuple(b for b in bases if isinstance(b, ModelType)) if not parents: return super().__new__(mcs,", "class User(Base): name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') ``` An", "inherited, while these are passed # through the metaclasses __new__ methods and processed", "try: given_value = getattr(meta, k) if internal: raise ValueError(f\"'{k}' is a reserved attribute", "(DB): db handle. \"\"\" pass async def _pre_delete_(self, db): \"\"\"Pre-delete hook. Override to", "meta_attrs[k] = v _set_meta_attr('proxy', False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ())", "bool: \"\"\"Whether it's an abstract model or not \"\"\" return self.Meta.abstract def _is_proxy_(self)", "db): \"\"\"Pre-update hook. Override to run post update cleanup. Args: db (DB): db", "abstract = True # postgresql example id = Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL')", "copy.deepcopy(v) else: meta_attrs[k] = v _set_meta_attr('proxy', False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ())", "db handle. \"\"\" pass async def _pre_insert_(self, db): \"\"\"Pre-insert hook. Override to run", "everything try: meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract except AttributeError: raise TypeError(f\"This model", "value for the key is valid considering exclude up values \"\"\" return self._is_valid_value_(k,", "gen=False): # if up: # exclude_values = self.Meta.exclude_values_up # else: # exclude_values =", "\"\"\" if k in exclude_keys: return False if fields and k not in", "fields[k].value = v elif k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can not set", "run post update cleanup. Args: db (DB): db handle. \"\"\" pass class Model(ModelBase):", "else: exclude_values = self.Meta.exclude_values_down fields = self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down # new_data =", "k): fields = self.Meta._fields_ if k in fields: fields[k].delete_value() else: super().__delattr__(k) def __getattr__(self,", "names that pass include/exclude criteria Args: up (bool, optional): up criteria or down", "the value for the key is valid considering exclude up values \"\"\" return", "is valid considering exclude values \"\"\" if v is Void: return False if", "exclude_fields_up or exclude_values_up in {self.__class__.__name__} Meta class. Or you are trying to set", "is abstract and it gets inherited. meta_attrs['_field_defs_'][n] = v elif n in attrs:", "raise AttributeError def __setattr__(self, k: str, v): if k == 'Meta': raise AttributeError(f\"Name", "field name if exists else raise AttributeError Args: n (str): field name Raises:", "({type(arg)}) to Model __init__ method. Expected: dictionary or keyword argument\") for k,v in", "'<NAME>', 'profession': 'Teacher'}) User({'name': '<NAME>', 'profession': 'Teacher'}, age=34) User({'name': '<NAME>', 'profession': 'Teacher', 'active':", "mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_', {}, internal=True, mutable=True) if meta_attrs['proxy']: #proxy model inherits", "if k in fields: fields[k].delete_value() else: super().__delattr__(k) def __getattr__(self, k): Meta = self.__dict__['Meta']", "names from `ModelClass.Meta.f`. This allows a spell-safe way to write the field names.", "fields \"\"\" return self.Meta._field_defs_ def _check_field_name_(self, n: str) -> str: \"\"\"Return the field", "names. If you misspell the name, you will get `AttributeError`. ```python f =", "which one should not. abstract = True def __init__(self, *args, **kwargs): super(Model, self).__init__(*args,", "AttributeError(f\"No such field `{n}` in model `{self.__name__}`\") def _get_fields_(self, up=False) -> Iterator[str]: \"\"\"Yields", "key, value pair \"\"\" if up: exclude_values = self.Meta.exclude_values_up fields = self.Meta.fields_up exclude_fields", "not \"\"\" return self.Meta.proxy def _get_pk_(self) -> str: \"\"\"Get primary column name \"\"\"", "db handle. \"\"\" pass class Model(ModelBase): \"\"\"Base model to be inherited by other", "given_type = type(given_value) required_type = type(v) if not given_type is required_type: raise TypeError(f\"Invalid", "model \"\"\" # The following needs to be defined here, not in meta.Meta", "self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k: str) -> bool: \"\"\"Returns True if the", "in self: reprs.append(f'{k}={repr(v)}') body = ', '.join(reprs) return f'{self.__class__.__name__}({body})' async def _pre_save_(self, db):", "return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v) def _get_all_fields_(self) -> Dict[str, Field]: \"\"\"Get all fields", "in all_fields: if not self._is_valid_key_(k, fields, exclude_keys): continue yield k def _get_FieldValue_data_valid_(self, data:", "hook. Override to run pre delete cleanup. Args: db (DB): db handle. \"\"\"", "_set_meta_attr('pk', 'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up',", "model attributes outside model definition.\") def __delattr__(self, k): raise NotImplementedError(\"You can not delete", "``` \"\"\" class Meta: # The following needs to be defined here, not", "fields = self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down all_fields = self._get_all_fields_() for k in all_fields:", "True self.Meta._fromdb_.remove(k) fields[k].value = v elif k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can", "given_value except AttributeError: if inherit: v = getattr(BaseMeta, k, v) # mutable values", "model inherits everything try: meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract except AttributeError: raise", "-> Iterator[Tuple[str, str]]: \"\"\"Yield each ordering from model parsed and converted to column,", "v in exclude_values[k]: return False if '' in exclude_values and v in exclude_values['']:", "in bases if isinstance(b, ModelType)) if not parents: return super().__new__(mcs, class_name, bases, attrs)", "etc... # see morm.meta.Meta for supported meta attributes. name = Field('varchar(65)') email =", "if the value for the key is valid considering exclude values \"\"\" if", "None) class _Meta_(mt.Meta): pass meta = attrs.pop('Meta', _Meta_) if not inspect.isclass(meta): #TEST: Meta", "True if the value for the key is valid considering exclude values \"\"\"", "False return True def _is_valid_up_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True", "exclude_keys:Tuple[str]) -> bool: \"\"\"Returns True if the key is valid considering include/exclude keys", "Error in model '{class_name}'\") _class_ = super().__new__(mcs, 'x_' + class_name, parents, attrs) BaseMeta", "optional): up criteria or down criteria. Defaults to False (down). Yields: str: field", "keys. Positional arguments must be dictionaries of keys and values. Example: ```python User(name='<NAME>',", "Yields: Iterator[Tuple[str, Any]]: Yields key, value pair \"\"\" if up: exclude_values = self.Meta.exclude_values_up", "def _is_valid_up_key_(self, k: str) -> bool: \"\"\"Returns True if the key is valid", "parsed and converted to column, direction direction is either `ASC` or `DESC` Args:", "\"\"\" pass async def _post_update_(self, db): \"\"\"Pre-update hook. Override to run post update", "class ModelBase(metaclass=ModelType): \"\"\"Base Model for all models. Do not inherit from this class,", "else: raise AttributeError(f'Can not set field `{k}`. It is excluded using either exclude_fields_up", "client Models, thus everything # included there will be blindly inherited, while these", "proxy model or not \"\"\" return self.Meta.proxy def _get_pk_(self) -> str: \"\"\"Get primary", "Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') profession = Field('varchar(255)', default='Unknown') random =", "= Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL') created_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT", "spelling mistake f.profession: 'Teacher', # safe from spelling mistake 'hobby': 'Gardenning', # unsafe", "# see morm.meta.Meta for supported meta attributes. name = Field('varchar(65)') email = Field('varchar(255)')", "_Meta_) if not inspect.isclass(meta): #TEST: Meta is restricted as a class raise TypeError(f\"Name", "k in exclude_values: if v in exclude_values[k]: return False if '' in exclude_values", "exclude_values_down in {self.__class__.__name__} Meta class. Or it does not have any valid value.')", "raise NotImplementedError(\"You can not delete model attributes outside model definition.\") def _is_valid_key_(self, k:str,", "bool: \"\"\"Check whether the key and value is valid for down (data retrieval)", "import Model from morm.datetime import timestamp class Base(Model): class Meta: pk = 'id'", "\"\"\"Check whether the key and value is valid for up (data update) \"\"\"", "# Ensure initialization is only performed for subclasses of Model # excluding Model", "import typing from typing import Optional, Dict, List, Tuple, TypeVar, Union, Any, Iterator", "ordered, officially from python 3.7 for n, v in _class_.__dict__.items(): if isinstance(v, Field):", "in exclude_values and v in exclude_values['']: return False return True def _is_valid_up_value_(self, k:", "__copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1' import", "User(Base): name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') ``` An advanced", "methods and processed accordingly # to determine which one should be inherited and", "the field names. If you misspell the name, you will get `AttributeError`. ```python", "data to be validated. up (bool, optional): whether up (data update) or down", "db_table = 'myapp_user' abstract = False # default is False proxy = False", "\"\"\" def __init__(self, func): self.__dict__['func'] = func def __getattr__(self, k): return self.__dict__['func'](k) def", "return self._is_valid_value_(k, v, self.Meta.exclude_values_down) def _is_valid_down_(self, k: str, v: Any) -> bool: \"\"\"Check", "all models. Do not inherit from this class, use Model instead. Raises: TypeError:", "Meta class. Or it does not have any valid value.') raise AttributeError def", "one should be inherited and which one should not. pk = 'id' '''Primary", "body = ', '.join(reprs) return f'{self.__class__.__name__}({body})' async def _pre_save_(self, db): \"\"\"Pre-save hook. Override", "if not self._is_valid_value_(k, v, exclude_values): # continue # if gen: # yield k,", "\"\"\"Pre-delete hook. Override to run post delete cleanup. Args: db (DB): db handle.", "db handle. \"\"\" pass async def _pre_update_(self, db): \"\"\"Pre-update hook. Override to run", "morm.types import Void import morm.meta as mt # for internal use # morm.db", "from morm.model import Model from morm.datetime import timestamp class Base(Model): class Meta: pk", "(data update) \"\"\" return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v) def _get_all_fields_(self) -> Dict[str, Field]:", "meta_attrs[k] = given_value except AttributeError: if inherit: v = getattr(BaseMeta, k, v) #", "not start with underscore.\") if meta_attrs['proxy'] and n in attrs: raise ValueError(f\"Proxy model", "MetaClass if classcell is not None: new_attrs['__classcell__'] = classcell return super().__new__(mcs, class_name, bases,", "k): raise NotImplementedError(\"You can not delete model attributes outside model definition.\") def _is_valid_key_(self,", "include/exclude criteria Args: up (bool, optional): up criteria or down criteria. Defaults to", "\"\"\"Pre-update hook. Override to run pre update cleanup. Args: db (DB): db handle.", "in data.items(): if not self._is_valid_key_(k, fields, exclude_fields): continue if not self._is_valid_value_(k, v.value, exclude_values):", "= '0.0.1' import inspect import typing from typing import Optional, Dict, List, Tuple,", "if self.__class__._is_valid_down_(k, f.value): yield k, f.value def __delattr__(self, k): fields = self.Meta._fields_ if", "mt.Meta # For client use class _FieldNames(): \"\"\"Access field names \"\"\" def __init__(self,", "model '{class_name}' can not define new field: {n}\") v.name = n # v.sql_conf.conf['table_name']", "= fields[k].value if self.__class__._is_valid_down_(k, v): return v raise AttributeError(f'Invalid attempt to access field", "the value for the key is valid considering exclude down values \"\"\" return", "post update cleanup. Args: db (DB): db handle. \"\"\" pass class Model(ModelBase): \"\"\"Base", "key, it is defaulted to 'id' abstract = True # postgresql example id", "not be a proxy model. It does not have a valid base or", "it is a proxy model or not \"\"\" return self.Meta.proxy def _get_pk_(self) ->", "{ f.name: '<NAME>', # safe from spelling mistake f.profession: 'Teacher', # safe from", "key is valid considering include/exclude up keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def", "Args: db (DB): db handle. \"\"\" pass async def _post_delete_(self, db): \"\"\"Pre-delete hook.", "import random def get_rand(): return random.randint(1, 9) class User(Base): class Meta: db_table =", "if k == 'Meta': raise AttributeError(f\"Name '{k} is reserved. You should not try", "'Meta', _Meta_) meta_attrs = {} def _set_meta_attr(k, v, mutable=False, inherit=True, internal=False): try: given_value", "to False. Yields: Iterator[Tuple[str, Any]]: Yields key, value pair \"\"\" if up: exclude_values", "to apply to the column Yields: Iterator[Tuple[str, str]]: Yields column, direction \"\"\" ordering", "cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_insert_(self, db): \"\"\"Pre-insert", "meta_attrs['_field_defs_'][n] = v elif n in attrs: new_attrs[n] = attrs[n] # we do", "function can be default ``` ## Initialize a model instance keyword arguments initialize", "= User.Meta.f my_data = { f.name: '<NAME>', # safe from spelling mistake f.profession:", "k not in fields: raise AttributeError(f\"No such field ('{k}') in model '{self.__class__.__name__}''\") #", "imported here. Meta = mt.Meta # For client use class _FieldNames(): \"\"\"Access field", "of all fields \"\"\" return self.Meta._field_defs_ def _check_field_name_(self, n: str) -> str: \"\"\"Return", "= Void abstract = True proxy = False ordering = () fields_up =", "Void abstract = True proxy = False ordering = () fields_up = ()", "_pre_update_(self, db): \"\"\"Pre-update hook. Override to run pre update cleanup. Args: db (DB):", "If invalid type of argument is provided. ## Special Model Meta attribute `f`:", "('{k}') in model '{self.__class__.__name__}''\") # v = fields[k].clean(v) # super().__setattr__(k, v) if self.__class__._is_valid_up_(k,", "mutable=True) if meta_attrs['proxy']: #proxy model inherits everything try: meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract'] =", "o = o[1:] o = f\"{quote}{o}{quote}\" yield o, direction class ModelBase(metaclass=ModelType): \"\"\"Base Model", "= self.Meta.ordering direction = 'ASC' for o in ordering: if o.startswith('-'): direction =", "raise AttributeError(f'Invalid attempt to access field `{k}`. It is excluded using either exclude_fields_down", "excluded using either exclude_fields_up or exclude_values_up in {self.__class__.__name__} Meta class. Or you are", "sql_onadd='NOT NULL', value=timestamp) ``` Then a minimal model could look like this: ```python", "self.Meta.exclude_values_up) def _is_valid_down_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True if the", "continue if not self._is_valid_value_(k, v.value, exclude_values): continue yield k, v # def _get_data_for_valid_values_(self,", "WITH TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE \"{table}\" ALTER COLUMN \"{column}\" SET DEFAULT", "\"\"\"Returns True if the key is valid considering include/exclude keys \"\"\" if k", "elif k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can not set field `{k}`. It", "_FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta'] = MetaClass if classcell is not", "`ModelClass.Meta.f`. This allows a spell-safe way to write the field names. If you", "inspect.isclass(meta): #TEST: Meta is restricted as a class raise TypeError(f\"Name 'Meta' is reserved", "has no attribute \"Meta\" def __new__(mcs, class_name: str, bases: tuple, attrs: dict): #", "bool: \"\"\"Returns True if the key is valid considering include/exclude up keys \"\"\"", "f.value): yield k, f.value def __delattr__(self, k): fields = self.Meta._fields_ if k in", "attrs) classcell = attrs.pop('__classcell__', None) class _Meta_(mt.Meta): pass meta = attrs.pop('Meta', _Meta_) if", "names \"\"\" def __init__(self, func): self.__dict__['func'] = func def __getattr__(self, k): return self.__dict__['func'](k)", "AttributeError(f\"No such field ('{k}') in model '{self.__class__.__name__}''\") # v = fields[k].clean(v) # super().__setattr__(k,", "{}, internal=True, mutable=True) if meta_attrs['proxy']: #proxy model inherits everything try: meta_attrs['db_table'] = BaseMeta.db_table", "meta.Meta is used in client Models, thus everything # included there will be", "Or you are trying to set an invalid value: {v}') def __repr__(self): reprs", "self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v) for arg in args: try: arg_items = arg.items() except", "Field('integer', default=get_rand) # function can be default ``` ## Initialize a model instance", "k): return self.__dict__['func'](k) def __setattr__(self, k, v): raise NotImplementedError class ModelType(type): Meta: typing.ClassVar", "fields, exclude_fields): continue if not self._is_valid_value_(k, v.value, exclude_values): continue yield k, v #", "return True def _is_valid_down_key_(self, k: str) -> bool: \"\"\"Returns True if the key", "elif o.startswith('+'): o = o[1:] o = f\"{quote}{o}{quote}\" yield o, direction class ModelBase(metaclass=ModelType):", "k, f.value def __delattr__(self, k): fields = self.Meta._fields_ if k in fields: fields[k].delete_value()", "age=34) ``` Raises: TypeError: If invalid type of argument is provided. ## Special", "do this after finalizing meta_attr def _get_field_name(n: str) -> str: if n in", "not in fields: raise AttributeError(f\"No such field ('{k}') in model '{self.__class__.__name__}''\") # v", "an invalid value: {v}') def __repr__(self): reprs = [] for k, v in", "\"\"\"Access field names \"\"\" def __init__(self, func): self.__dict__['func'] = func def __getattr__(self, k):", "Field must not contain table_name, because it is void when model is abstract", "here. Meta = mt.Meta # For client use class _FieldNames(): \"\"\"Access field names", "model without applying any restriction. Returns: Dict[str, Field]: Dictionary of all fields \"\"\"", "meta_attrs['db_table'] = Void else: _set_meta_attr('db_table', class_name, inherit=False) new_attrs = {} # dict is", "ValueError(f\"Proxy model '{class_name}' can not define new field: {n}\") v.name = n #", "str]]: \"\"\"Yield each ordering from model parsed and converted to column, direction direction", "self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can not set field `{k}`. It is excluded using", "Void else: _set_meta_attr('db_table', class_name, inherit=False) new_attrs = {} # dict is ordered, officially", "cleanup. Args: db (DB): db handle. \"\"\" pass class Model(ModelBase): \"\"\"Base model to", "\"\"\" pass async def _pre_update_(self, db): \"\"\"Pre-update hook. Override to run pre update", "down criteria. Defaults to False (down). Yields: str: field name \"\"\" if up:", "data: dict, up=False) -> Iterator[Tuple[str, Any]]: \"\"\"Yields valid key,value pairs from data. Validity", "= Field('varchar(255)') ``` An advanced model could look like: ```python import random def", "n, v in _class_.__dict__.items(): if isinstance(v, Field): if n.startswith('_'): raise AttributeError(f\"Invalid field name", "self.Meta.exclude_values_down # new_data = type(data)() # for k,v in data.items(): # if not", "str: if n in meta_attrs['_field_defs_']: return n else: raise AttributeError(f\"No such field '{n}'", "and which one should not. pk = 'id' '''Primary key''' db_table = Void", "= self.Meta._fields_ if k in fields: fields[k].delete_value() else: super().__delattr__(k) def __getattr__(self, k): Meta", "for class Meta. Error in model '{class_name}'\") given_type = type(given_value) required_type = type(v)", "False if fields and k not in fields: return False return True def", "= True proxy = False ordering = () fields_up = () fields_down =", "only performed for subclasses of Model # excluding Model class itself. parents =", "new_attrs['__classcell__'] = classcell return super().__new__(mcs, class_name, bases, new_attrs) def __setattr__(self, k, v): raise", "gets inherited. meta_attrs['_field_defs_'][n] = v elif n in attrs: new_attrs[n] = attrs[n] #", "``` ## Initialize a model instance keyword arguments initialize corresponding fields according to", "`{self.__name__}`\") def _get_fields_(self, up=False) -> Iterator[str]: \"\"\"Yields field names that pass include/exclude criteria", "pass include/exclude criteria Args: up (bool, optional): up criteria or down criteria. Defaults", "User.Meta.f my_data = { f.name: '<NAME>', # safe from spelling mistake f.profession: 'Teacher',", "Record # type: ignore from morm.exceptions import ItemDoesNotExistError from morm.fields.field import Field, FieldValue", "def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool: \"\"\"Returns True if the key is", "= mt.Meta # For client use class _FieldNames(): \"\"\"Access field names \"\"\" def", "if not parents: return super().__new__(mcs, class_name, bases, attrs) classcell = attrs.pop('__classcell__', None) class", "else: # new_data[k] = v # if not gen: # return new_data def", "= Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') profession = Field('varchar(255)', default='Unknown') random", "as mt # for internal use # morm.db must not be imported here.", "in model '{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta'] =", "name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') ``` An advanced model", "Field]: \"\"\"Get all fields on model without applying any restriction. Returns: Dict[str, Field]:", "can access field names from `ModelClass.Meta.f`. This allows a spell-safe way to write", "Iterator[str]: \"\"\"Yields field names that pass include/exclude criteria Args: up (bool, optional): up", "abc import ABCMeta from asyncpg import Record # type: ignore from morm.exceptions import", "new_attrs = {} # dict is ordered, officially from python 3.7 for n,", "exclude up values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_up) def _is_valid_down_value_(self, k: str, v:", "or `DESC` Args: quote (str): Quote to apply to the column Yields: Iterator[Tuple[str,", "= Void else: _set_meta_attr('db_table', class_name, inherit=False) new_attrs = {} # dict is ordered,", "= False # default is False proxy = False # default is False", "return self.Meta.abstract def _is_proxy_(self) -> bool: \"\"\"Whether it is a proxy model or", "included there will be blindly inherited, while these are passed # through the", "if n in meta_attrs['_field_defs_']: return n else: raise AttributeError(f\"No such field '{n}' in", "When misspelled fields are tried to set. \"\"\" class Meta: \"\"\"Meta that holds", "return False if fields and k not in fields: return False return True", "str: \"\"\"Return the field name if exists else raise AttributeError Args: n (str):", "TypeVar, Union, Any, Iterator from collections import OrderedDict import copy from abc import", "validated. up (bool, optional): whether up (data update) or down (data retrieval). Defaults", "self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down # new_data = type(data)() for k,v in data.items(): if", "keys \"\"\" if k in exclude_keys: return False if fields and k not", "\"Meta\" def __new__(mcs, class_name: str, bases: tuple, attrs: dict): # Ensure initialization is", "True if the value for the key is valid considering exclude up values", "{} _fromdb_: List[str] = [] # super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta'] = Meta for", "one should not. abstract = True def __init__(self, *args, **kwargs): super(Model, self).__init__(*args, **kwargs)", "\"\"\" pass async def _post_insert_(self, db): \"\"\"Pre-insert hook. Override to run post insert", "can not define new field: {n}\") v.name = n # v.sql_conf.conf['table_name'] = meta_attrs['db_table']", "a valid base or super base non-proxy model\") else: _set_meta_attr('abstract', False, inherit=False) if", "for the key is valid considering exclude up values \"\"\" return self._is_valid_value_(k, v,", "meta_attrs) new_attrs['Meta'] = MetaClass if classcell is not None: new_attrs['__classcell__'] = classcell return", "ignore from morm.exceptions import ItemDoesNotExistError from morm.fields.field import Field, FieldValue from morm.types import", "False proxy = False # default is False # ... etc... # see", "Dict[str, Field]: \"\"\"Get all fields on model without applying any restriction. Returns: Dict[str,", "func): self.__dict__['func'] = func def __getattr__(self, k): return self.__dict__['func'](k) def __setattr__(self, k, v):", "\\ Field name must not start with underscore.\") if meta_attrs['proxy'] and n in", "created_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE \"{table}\" ALTER COLUMN", "pk = 'id' # setting primary key, it is defaulted to 'id' abstract", "void when model is abstract and it gets inherited. meta_attrs['_field_defs_'][n] = v elif", "is required_type: raise TypeError(f\"Invalid type {given_type} given for attribute '{k}' in class '{class_name}.Meta'.", "supported meta attributes. name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') profession", "is valid considering include/exclude keys \"\"\" if k in exclude_keys: return False if", "'{self.__class__.__name__}''\") # v = fields[k].clean(v) # super().__setattr__(k, v) if self.__class__._is_valid_up_(k, v): if k", "model is abstract and it gets inherited. meta_attrs['_field_defs_'][n] = v elif n in", "self.Meta.pk def _get_ordering_(self, quote: str) -> Iterator[Tuple[str, str]]: \"\"\"Yield each ordering from model", "include/exclude key/value criteria. Args: data (dict): data to be validated. up (bool, optional):", "def __init__(self, func): self.__dict__['func'] = func def __getattr__(self, k): return self.__dict__['func'](k) def __setattr__(self,", "either `ASC` or `DESC` Args: quote (str): Quote to apply to the column", "pre insert cleanup. Args: db (DB): db handle. \"\"\" pass async def _pre_update_(self,", "False # default is False # ... etc... # see morm.meta.Meta for supported", "'{class_name}' can not be a proxy model. It does not have a valid", "in exclude_values['']: return False return True def _is_valid_up_value_(self, k: str, v: Any) ->", "= classcell return super().__new__(mcs, class_name, bases, new_attrs) def __setattr__(self, k, v): raise NotImplementedError(\"You", "pass meta = attrs.pop('Meta', _Meta_) if not inspect.isclass(meta): #TEST: Meta is restricted as", "model definition.\") def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool: \"\"\"Returns True if the", "are tried to set. \"\"\" class Meta: \"\"\"Meta that holds metadata for model", "_set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_', {}, internal=True, mutable=True) if", "'profession': 'Teacher'}, age=34) User({'name': '<NAME>', 'profession': 'Teacher', 'active': True}, age=34) ``` Raises: TypeError:", "key is valid considering include/exclude down keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def", "raise NotImplementedError(\"You can not set model attributes outside model definition.\") def __delattr__(self, k):", "typing from typing import Optional, Dict, List, Tuple, TypeVar, Union, Any, Iterator from", "v elif n in attrs: new_attrs[n] = attrs[n] # we do this after", "to set. \"\"\" class Meta: \"\"\"Meta that holds metadata for model \"\"\" #", "db): \"\"\"Pre-save hook. Override to run pre save cleanup. Args: db (DB): db", "def _post_insert_(self, db): \"\"\"Pre-insert hook. Override to run post insert cleanup. Args: db", "type(v) if not given_type is required_type: raise TypeError(f\"Invalid type {given_type} given for attribute", "raise AttributeError(f'Can not set field `{k}`. It is excluded using either exclude_fields_up or", "return False return True def _is_valid_down_key_(self, k: str) -> bool: \"\"\"Returns True if", "self.__class__._is_valid_up_(k, v): if k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k) fields[k].value = v", "here, not in meta.Meta # meta.Meta is used in client Models, thus everything", "return self.Meta.db_table def _is_abstract_(self) -> bool: \"\"\"Whether it's an abstract model or not", "__delattr__(self, k): raise NotImplementedError(\"You can not delete model attributes outside model definition.\") def", "= type(data)() for k,v in data.items(): if not self._is_valid_key_(k, fields, exclude_fields): continue if", "class _Meta_(mt.Meta): pass meta = attrs.pop('Meta', _Meta_) if not inspect.isclass(meta): #TEST: Meta is", "mutable=False, inherit=True, internal=False): try: given_value = getattr(meta, k) if internal: raise ValueError(f\"'{k}' is", "delete cleanup. Args: db (DB): db handle. \"\"\" pass async def _pre_insert_(self, db):", "setattr(self, k, v) def __iter__(self): \"\"\"Iter through k, v where k is field", "models. Do not inherit from this class, use Model instead. Raises: TypeError: When", "_is_proxy_(self) -> bool: \"\"\"Whether it is a proxy model or not \"\"\" return", "up (bool, optional): whether up (data update) or down (data retrieval). Defaults to", "_post_delete_(self, db): \"\"\"Pre-delete hook. Override to run post delete cleanup. Args: db (DB):", "this class, use Model instead. Raises: TypeError: When invalid type is encountered AttributeError:", "self.Meta.db_table def _is_abstract_(self) -> bool: \"\"\"Whether it's an abstract model or not \"\"\"", "= self.Meta.exclude_fields_down all_fields = self._get_all_fields_() for k in all_fields: if not self._is_valid_key_(k, fields,", "for k,v in data.items(): if not self._is_valid_key_(k, fields, exclude_fields): continue if not self._is_valid_value_(k,", "post delete cleanup. Args: db (DB): db handle. \"\"\" pass async def _pre_insert_(self,", "\"\"\" if n in self.Meta._field_defs_: return n else: raise AttributeError(f\"No such field `{n}`", "first: ```python from morm.model import Model from morm.datetime import timestamp class Base(Model): class", "names are reserved for predefined methods.') self.__dict__[k] = v return fields = self.Meta._fields_", "str, bases: tuple, attrs: dict): # Ensure initialization is only performed for subclasses", "self.__class__._is_valid_down_(k, v): return v raise AttributeError(f'Invalid attempt to access field `{k}`. It is", "class Meta: db_table = 'myapp_user' abstract = False # default is False proxy", "use Model instead. Raises: TypeError: When invalid type is encountered AttributeError: When misspelled", "Or it does not have any valid value.') raise AttributeError def __setattr__(self, k:", "TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE \"{table}\" ALTER COLUMN \"{column}\" SET DEFAULT NOW()',))", "v) if self.__class__._is_valid_up_(k, v): if k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k) fields[k].value", "define new field: {n}\") v.name = n # v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field", "\"\"\" class Meta: # The following needs to be defined here, not in", "run pre update cleanup. Args: db (DB): db handle. \"\"\" pass async def", "# safe from spelling mistake f.profession: 'Teacher', # safe from spelling mistake 'hobby':", "<gh_stars>1-10 \"\"\"Model. \"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>'", "k.startswith('_'): if k.endswith('_'): raise AttributeError('_<name>_ such names are reserved for predefined methods.') self.__dict__[k]", "following needs to be defined here, not in meta.Meta # meta.Meta is used", "= {} _fromdb_: List[str] = [] # super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta'] = Meta", "NotImplementedError class ModelType(type): Meta: typing.ClassVar # fixing mypy error: \"ModelType\" has no attribute", "else: _set_meta_attr('abstract', False, inherit=False) if meta_attrs['abstract']: meta_attrs['db_table'] = Void else: _set_meta_attr('db_table', class_name, inherit=False)", "blindly inherited, while these are passed # through the metaclasses __new__ methods and", "else raise AttributeError Args: n (str): field name Raises: AttributeError: if field name", "to run pre update cleanup. Args: db (DB): db handle. \"\"\" pass async", "f.name: '<NAME>', # safe from spelling mistake f.profession: 'Teacher', # safe from spelling", "'{class_name}'\") _class_ = super().__new__(mcs, 'x_' + class_name, parents, attrs) BaseMeta = getattr(_class_, 'Meta',", "{given_type} given for attribute '{k}' in class '{class_name}.Meta'. Required {required_type}.\") meta_attrs[k] = given_value", "attribute '{k}' in class '{class_name}.Meta'. Required {required_type}.\") meta_attrs[k] = given_value except AttributeError: if", "criteria. Args: data (dict): data to be validated. up (bool, optional): whether up", "keyword argument\") for k,v in arg_items: setattr(self, k, v) for k,v in kwargs.items():", "to be defined here, not in meta.Meta # meta.Meta is used in client", "inherit from this class, use Model instead. Raises: TypeError: When invalid type is", "= self._get_all_fields_() for k in all_fields: if not self._is_valid_key_(k, fields, exclude_keys): continue yield", "valid considering include/exclude up keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self, k:", "3.7 for n, v in _class_.__dict__.items(): if isinstance(v, Field): if n.startswith('_'): raise AttributeError(f\"Invalid", "for the key is valid considering exclude values \"\"\" if v is Void:", "self.Meta._fromdb_.remove(k) fields[k].value = v elif k in self.Meta._fromdb_: self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can not", "pairs from data. Validity is checked against include/exclude key/value criteria. Args: data (dict):", "class_name, inherit=False) new_attrs = {} # dict is ordered, officially from python 3.7", "_pre_delete_(self, db): \"\"\"Pre-delete hook. Override to run pre delete cleanup. Args: db (DB):", "access field `{k}`. It is excluded using either exclude_fields_down or exclude_values_down in {self.__class__.__name__}", "'id' abstract = True # postgresql example id = Field('SERIAL', sql_onadd='PRIMARY KEY NOT", "AttributeError(f\"No such field '{n}' in model '{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta',", "# v = fields[k].clean(v) # super().__setattr__(k, v) if self.__class__._is_valid_up_(k, v): if k in", "It's more than a good practice to define a Base model first: ```python", "is defaulted to 'id' abstract = True # postgresql example id = Field('SERIAL',", "Returns: str: field name \"\"\" if n in self.Meta._field_defs_: return n else: raise", "-> str: \"\"\"Get db table name for model \"\"\" return self.Meta.db_table def _is_abstract_(self)", "= Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', value=timestamp) ``` Then a minimal model", "not exist Returns: str: field name \"\"\" if n in self.Meta._field_defs_: return n", "values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_up) def _is_valid_down_value_(self, k: str, v: Any) ->", "v in exclude_values['']: return False return True def _is_valid_up_value_(self, k: str, v: Any)", "could look like this: ```python class User(Base): name = Field('varchar(65)') email = Field('varchar(255)')", "pass async def _pre_insert_(self, db): \"\"\"Pre-insert hook. Override to run pre insert cleanup.", "run pre save cleanup. Args: db (DB): db handle. \"\"\" pass async def", "o.startswith('-'): direction = 'DESC' o = o[1:] elif o.startswith('+'): o = o[1:] o", "fields: raise AttributeError(f\"No such field ('{k}') in model '{self.__class__.__name__}''\") # v = fields[k].clean(v)", "ItemDoesNotExistError from morm.fields.field import Field, FieldValue from morm.types import Void import morm.meta as", "could look like: ```python import random def get_rand(): return random.randint(1, 9) class User(Base):", "(bool, optional): whether up (data update) or down (data retrieval). Defaults to False.", "proxy = False ordering = () fields_up = () fields_down = () exclude_fields_up", "valid base or super base non-proxy model\") else: _set_meta_attr('abstract', False, inherit=False) if meta_attrs['abstract']:", "update) or down (data retrieval). Defaults to False. Yields: Iterator[Tuple[str, Any]]: Yields key,", "reprs = [] for k, v in self: reprs.append(f'{k}={repr(v)}') body = ', '.join(reprs)", "() fields_up = () fields_down = () exclude_fields_up = () exclude_fields_down = ()", "OrderedDict import copy from abc import ABCMeta from asyncpg import Record # type:", "cleanup. Args: db (DB): db handle. \"\"\" pass async def _pre_update_(self, db): \"\"\"Pre-update", "= BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract except AttributeError: raise TypeError(f\"This model '{class_name}' can not", "for n, v in _class_.__dict__.items(): if isinstance(v, Field): if n.startswith('_'): raise AttributeError(f\"Invalid field", "the metaclasses __new__ methods and processed accordingly # to determine which one should", "= copy.deepcopy(v) else: meta_attrs[k] = v _set_meta_attr('proxy', False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up',", "keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self, k: str, v: Any, exclude_values:", "of keys and values. Example: ```python User(name='<NAME>', profession='Teacher') User({'name': '<NAME>', 'profession': 'Teacher'}) User({'name':", "self.Meta.exclude_fields_up) def _is_valid_value_(self, k: str, v: Any, exclude_values: Dict[str, Tuple[Any]]) -> bool: \"\"\"Returns", "Dictionary of all fields \"\"\" return self.Meta._field_defs_ def _check_field_name_(self, n: str) -> str:", "Field]: Dictionary of all fields \"\"\" return self.Meta._field_defs_ def _check_field_name_(self, n: str) ->", "to define a Base model first: ```python from morm.model import Model from morm.datetime", "handle. \"\"\" pass async def _post_insert_(self, db): \"\"\"Pre-insert hook. Override to run post", "# type: ignore from morm.exceptions import ItemDoesNotExistError from morm.fields.field import Field, FieldValue from", "for down (data retrieval) \"\"\" return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v) def _is_valid_up_(self, k:", "n else: raise AttributeError(f\"No such field '{n}' in model '{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name)", "default=get_rand) # function can be default ``` ## Initialize a model instance keyword", "'hobby': 'Gardenning', # unsafe from spelling mistake } ``` \"\"\" class Meta: #", "type is encountered AttributeError: When misspelled fields are tried to set. \"\"\" class", "value for the key is valid considering exclude down values \"\"\" return self._is_valid_value_(k,", "v, self.Meta.exclude_values_down) def _is_valid_down_(self, k: str, v: Any) -> bool: \"\"\"Check whether the", "= self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up else: exclude_values = self.Meta.exclude_values_down fields = self.Meta.fields_down exclude_fields", "= func def __getattr__(self, k): return self.__dict__['func'](k) def __setattr__(self, k, v): raise NotImplementedError", "to run post insert cleanup. Args: db (DB): db handle. \"\"\" pass async", "valid value.') raise AttributeError def __setattr__(self, k: str, v): if k == 'Meta':", "key and value is valid for up (data update) \"\"\" return self._is_valid_up_key_(k) and", "spelling mistake 'hobby': 'Gardenning', # unsafe from spelling mistake } ``` \"\"\" class", "fields_down = () exclude_fields_up = () exclude_fields_down = () exclude_values_up = {'':()} exclude_values_down", "attributes outside model definition.\") def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool: \"\"\"Returns True", "def __delattr__(self, k): fields = self.Meta._fields_ if k in fields: fields[k].delete_value() else: super().__delattr__(k)", "Any) -> bool: \"\"\"Returns True if the value for the key is valid", "argument\") for k,v in arg_items: setattr(self, k, v) for k,v in kwargs.items(): setattr(self,", "ABCMeta from asyncpg import Record # type: ignore from morm.exceptions import ItemDoesNotExistError from", "False ordering = () fields_up = () fields_down = () exclude_fields_up = ()", "hook. Override to run post delete cleanup. Args: db (DB): db handle. \"\"\"", "run post delete cleanup. Args: db (DB): db handle. \"\"\" pass async def", "arguments initialize corresponding fields according to the keys. Positional arguments must be dictionaries", "is provided. ## Special Model Meta attribute `f`: You can access field names", "_fields_: Dict[str, FieldValue] = {} _fromdb_: List[str] = [] # super(ModelBase, self).__setattr__('Meta', Meta)", "import ItemDoesNotExistError from morm.fields.field import Field, FieldValue from morm.types import Void import morm.meta", "misspell the name, you will get `AttributeError`. ```python f = User.Meta.f my_data =", "= Field('varchar(255)', default='Unknown') random = Field('integer', default=get_rand) # function can be default ```", "raise TypeError(f\"Invalid argument type ({type(arg)}) to Model __init__ method. Expected: dictionary or keyword", "model parsed and converted to column, direction direction is either `ASC` or `DESC`", "\"\"\"Returns True if the key is valid considering include/exclude up keys \"\"\" return", "self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up else: fields = self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down all_fields =", "# mutable values can be changed by other class meta change if mutable:", "type of argument is provided. ## Special Model Meta attribute `f`: You can", "Void import morm.meta as mt # for internal use # morm.db must not", "if k.startswith('_'): if k.endswith('_'): raise AttributeError('_<name>_ such names are reserved for predefined methods.')", "and processed accordingly # to determine which one should be inherited and which", "set model attributes outside model definition.\") def __delattr__(self, k): raise NotImplementedError(\"You can not", "on model without applying any restriction. Returns: Dict[str, Field]: Dictionary of all fields", "are trying to set an invalid value: {v}') def __repr__(self): reprs = []", "up criteria or down criteria. Defaults to False (down). Yields: str: field name", "safe from spelling mistake f.profession: 'Teacher', # safe from spelling mistake 'hobby': 'Gardenning',", "is checked against include/exclude key/value criteria. Args: data (dict): data to be validated.", "not be imported here. Meta = mt.Meta # For client use class _FieldNames():", "self.Meta._fromdb_.remove(k) else: raise AttributeError(f'Can not set field `{k}`. It is excluded using either", "default ``` ## Initialize a model instance keyword arguments initialize corresponding fields according", "where k is field name and v is field value Yields: tuple: field_name,", "handle. \"\"\" pass async def _post_delete_(self, db): \"\"\"Pre-delete hook. Override to run post", "from abc import ABCMeta from asyncpg import Record # type: ignore from morm.exceptions", "mistake 'hobby': 'Gardenning', # unsafe from spelling mistake } ``` \"\"\" class Meta:", "morm.db must not be imported here. Meta = mt.Meta # For client use", "k, v # else: # new_data[k] = v # if not gen: #", "COLUMN \"{column}\" SET DEFAULT NOW()',)) updated_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL',", "and values. Example: ```python User(name='<NAME>', profession='Teacher') User({'name': '<NAME>', 'profession': 'Teacher'}) User({'name': '<NAME>', 'profession':", "raise AttributeError('_<name>_ such names are reserved for predefined methods.') self.__dict__[k] = v return", "class ModelType(type): Meta: typing.ClassVar # fixing mypy error: \"ModelType\" has no attribute \"Meta\"", "[] for k, v in self: reprs.append(f'{k}={repr(v)}') body = ', '.join(reprs) return f'{self.__class__.__name__}({body})'", "and it gets inherited. meta_attrs['_field_defs_'][n] = v elif n in attrs: new_attrs[n] =", "Meta._fields_ if k in fields: v = fields[k].value if self.__class__._is_valid_down_(k, v): return v", "`{n}` in model `{self.__name__}`\") def _get_fields_(self, up=False) -> Iterator[str]: \"\"\"Yields field names that", "Yields column, direction \"\"\" ordering = self.Meta.ordering direction = 'ASC' for o in", "def _is_valid_down_(self, k: str, v: Any) -> bool: \"\"\"Check whether the key and", "`{k}`. It is excluded using either exclude_fields_up or exclude_values_up in {self.__class__.__name__} Meta class.", "if up: # exclude_values = self.Meta.exclude_values_up # else: # exclude_values = self.Meta.exclude_values_down #", "does not exist Returns: str: field name \"\"\" if n in self.Meta._field_defs_: return", "postgresql example id = Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL') created_at = Field('TIMESTAMP WITH", "field_name, field_value \"\"\" for k, f in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value): yield k,", "definition.\") def __delattr__(self, k): raise NotImplementedError(\"You can not delete model attributes outside model", "str, v: Any) -> bool: \"\"\"Returns True if the value for the key", "defined here, not in meta.Meta # meta.Meta is used in client Models, thus", "with underscore.\") if meta_attrs['proxy'] and n in attrs: raise ValueError(f\"Proxy model '{class_name}' can", "k, v) # mutable values can be changed by other class meta change", "= Field('varchar(255)') password = Field('varchar(255)') ``` An advanced model could look like: ```python", "isinstance(v, Field): if n.startswith('_'): raise AttributeError(f\"Invalid field name '{n}' in model '{class_name}'. \\", "FieldValue] = {} _fromdb_: List[str] = [] # super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta'] =", "cleanup. Args: db (DB): db handle. \"\"\" pass async def _pre_insert_(self, db): \"\"\"Pre-insert", "by other models. It's more than a good practice to define a Base", "{'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_', {}, internal=True, mutable=True) if meta_attrs['proxy']: #proxy model", "model attributes outside model definition.\") def _is_valid_key_(self, k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool: \"\"\"Returns", "\"\"\"Returns True if the value for the key is valid considering exclude down", "Override to run pre save cleanup. Args: db (DB): db handle. \"\"\" pass", "the key is valid considering include/exclude keys \"\"\" if k in exclude_keys: return", "exclude_fields = self.Meta.exclude_fields_down # new_data = type(data)() for k,v in data.items(): if not", "_post_save_(self, db): \"\"\"Pre-save hook. Override to run post save cleanup. Args: db (DB):", "(DB): db handle. \"\"\" pass async def _post_delete_(self, db): \"\"\"Pre-delete hook. Override to", "pass class Model(ModelBase): \"\"\"Base model to be inherited by other models. It's more", "v): if k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k) fields[k].value = v elif", "(DB): db handle. \"\"\" pass async def _post_update_(self, db): \"\"\"Pre-update hook. Override to", "# exclude_values = self.Meta.exclude_values_down # new_data = type(data)() # for k,v in data.items():", "not have a valid base or super base non-proxy model\") else: _set_meta_attr('abstract', False,", "o, direction class ModelBase(metaclass=ModelType): \"\"\"Base Model for all models. Do not inherit from", "self.Meta.ordering direction = 'ASC' for o in ordering: if o.startswith('-'): direction = 'DESC'", "def _pre_update_(self, db): \"\"\"Pre-update hook. Override to run pre update cleanup. Args: db", "cleanup. Args: db (DB): db handle. \"\"\" pass async def _pre_delete_(self, db): \"\"\"Pre-delete", "to run pre insert cleanup. Args: db (DB): db handle. \"\"\" pass async", "not parents: return super().__new__(mcs, class_name, bases, attrs) classcell = attrs.pop('__classcell__', None) class _Meta_(mt.Meta):", "reserved. You should not try to change it.\") if k.startswith('_'): if k.endswith('_'): raise", "()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_', {},", "Args: db (DB): db handle. \"\"\" pass async def _post_insert_(self, db): \"\"\"Pre-insert hook.", "BaseMeta = getattr(_class_, 'Meta', _Meta_) meta_attrs = {} def _set_meta_attr(k, v, mutable=False, inherit=True,", "in model '{class_name}'. \\ Field name must not start with underscore.\") if meta_attrs['proxy']", "k,v in data.items(): if not self._is_valid_key_(k, fields, exclude_fields): continue if not self._is_valid_value_(k, v.value,", "The following needs to be defined here, not in meta.Meta # meta.Meta is", "from spelling mistake 'hobby': 'Gardenning', # unsafe from spelling mistake } ``` \"\"\"", "arg.items() except AttributeError: raise TypeError(f\"Invalid argument type ({type(arg)}) to Model __init__ method. Expected:", "\"\"\" # The following needs to be defined here, not in meta.Meta #", "exclude_keys = self.Meta.exclude_fields_down all_fields = self._get_all_fields_() for k in all_fields: if not self._is_valid_key_(k,", "meta = attrs.pop('Meta', _Meta_) if not inspect.isclass(meta): #TEST: Meta is restricted as a", "super().__new__(mcs, class_name, bases, new_attrs) def __setattr__(self, k, v): raise NotImplementedError(\"You can not set", "Quote to apply to the column Yields: Iterator[Tuple[str, str]]: Yields column, direction \"\"\"", "pass configuration or metadata of a model. Error in model '{class_name}'\") _class_ =", "profession = Field('varchar(255)', default='Unknown') random = Field('integer', default=get_rand) # function can be default", "class_name, parents, attrs) BaseMeta = getattr(_class_, 'Meta', _Meta_) meta_attrs = {} def _set_meta_attr(k,", "False # ... etc... # see morm.meta.Meta for supported meta attributes. name =", "model \"\"\" return self.Meta.db_table def _is_abstract_(self) -> bool: \"\"\"Whether it's an abstract model", "= self.Meta.exclude_fields_down # new_data = type(data)() for k,v in data.items(): if not self._is_valid_key_(k,", "name, you will get `AttributeError`. ```python f = User.Meta.f my_data = { f.name:", "inherit: v = getattr(BaseMeta, k, v) # mutable values can be changed by", "Field('varchar(255)') password = Field('varchar(255)') profession = Field('varchar(255)', default='Unknown') random = Field('integer', default=get_rand) #", "if v in exclude_values[k]: return False if '' in exclude_values and v in", "and value is valid for down (data retrieval) \"\"\" return self._is_valid_down_key_(k) and self._is_valid_down_value_(k,", "return False if k in exclude_values: if v in exclude_values[k]: return False if", "<https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1' import inspect import typing from typing", "Field] _fields_: Dict[str, FieldValue] _fromdb_: List[str] def __init__(self, *args, **kwargs): class Meta: _fields_:", "k in exclude_keys: return False if fields and k not in fields: return", "Any, Iterator from collections import OrderedDict import copy from abc import ABCMeta from", "the key is valid considering exclude values \"\"\" if v is Void: return", "SET DEFAULT NOW()',)) updated_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', value=timestamp) ```", "# setting primary key, it is defaulted to 'id' abstract = True #", "sql_alter=('ALTER TABLE \"{table}\" ALTER COLUMN \"{column}\" SET DEFAULT NOW()',)) updated_at = Field('TIMESTAMP WITH", "not given_type is required_type: raise TypeError(f\"Invalid type {given_type} given for attribute '{k}' in", "def _get_ordering_(self, quote: str) -> Iterator[Tuple[str, str]]: \"\"\"Yield each ordering from model parsed", "name \"\"\" if up: fields = self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up else: fields =", "k not in fields: return False return True def _is_valid_down_key_(self, k: str) ->", "return False return True def _is_valid_up_value_(self, k: str, v: Any) -> bool: \"\"\"Returns", "exclude_fields = self.Meta.exclude_fields_up else: exclude_values = self.Meta.exclude_values_down fields = self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down", "is Void: return False if k in exclude_values: if v in exclude_values[k]: return", "Tuple[Any]]) -> bool: \"\"\"Returns True if the value for the key is valid", "from data. Validity is checked against include/exclude key/value criteria. Args: data (dict): data", "type(data)() for k,v in data.items(): if not self._is_valid_key_(k, fields, exclude_fields): continue if not", "AttributeError: if inherit: v = getattr(BaseMeta, k, v) # mutable values can be", "whether the key and value is valid for down (data retrieval) \"\"\" return", "bool: \"\"\"Returns True if the key is valid considering include/exclude down keys \"\"\"", "'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()},", "= () exclude_fields_up = () exclude_fields_down = () exclude_values_up = {'':()} exclude_values_down =", "direction direction is either `ASC` or `DESC` Args: quote (str): Quote to apply", "= self.Meta.exclude_values_up fields = self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up else: exclude_values = self.Meta.exclude_values_down fields", "```python User(name='<NAME>', profession='Teacher') User({'name': '<NAME>', 'profession': 'Teacher'}) User({'name': '<NAME>', 'profession': 'Teacher'}, age=34) User({'name':", "```python class User(Base): name = Field('varchar(65)') email = Field('varchar(255)') password = Field('varchar(255)') ```", "def _is_valid_down_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True if the value", "given_type is required_type: raise TypeError(f\"Invalid type {given_type} given for attribute '{k}' in class", "_is_valid_down_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True if the value for", "from morm.types import Void import morm.meta as mt # for internal use #", "fields, exclude_keys): continue yield k def _get_FieldValue_data_valid_(self, data: dict, up=False) -> Iterator[Tuple[str, Any]]:", "n in attrs: new_attrs[n] = attrs[n] # we do this after finalizing meta_attr", "Field name must not start with underscore.\") if meta_attrs['proxy'] and n in attrs:", "DEFAULT NOW()',)) updated_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', value=timestamp) ``` Then", "# exclude_values = self.Meta.exclude_values_up # else: # exclude_values = self.Meta.exclude_values_down # new_data =", "raise TypeError(f\"Name 'Meta' is reserved for a class to pass configuration or metadata", "than a good practice to define a Base model first: ```python from morm.model", "try: meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract except AttributeError: raise TypeError(f\"This model '{class_name}'", "\"\"\"Pre-delete hook. Override to run pre delete cleanup. Args: db (DB): db handle.", "get_rand(): return random.randint(1, 9) class User(Base): class Meta: db_table = 'myapp_user' abstract =", "data (dict): data to be validated. up (bool, optional): whether up (data update)", "List[str] = [] # super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta'] = Meta for k, v", "\"\"\" class Meta: \"\"\"Meta that holds metadata for model \"\"\" # The following", "not contain table_name, because it is void when model is abstract and it", "write the field names. If you misspell the name, you will get `AttributeError`.", "hook. Override to run pre save cleanup. Args: db (DB): db handle. \"\"\"", "Meta: db_table = 'myapp_user' abstract = False # default is False proxy =", "raise TypeError(f\"This model '{class_name}' can not be a proxy model. It does not", "if exists else raise AttributeError Args: n (str): field name Raises: AttributeError: if", "_post_update_(self, db): \"\"\"Pre-update hook. Override to run post update cleanup. Args: db (DB):", "it.\") if k.startswith('_'): if k.endswith('_'): raise AttributeError('_<name>_ such names are reserved for predefined", "fields: v = fields[k].value if self.__class__._is_valid_down_(k, v): return v raise AttributeError(f'Invalid attempt to", "self._is_valid_down_value_(k, v) def _is_valid_up_(self, k: str, v: Any) -> bool: \"\"\"Check whether the", "Optional, Dict, List, Tuple, TypeVar, Union, Any, Iterator from collections import OrderedDict import", "to pass configuration or metadata of a model. Error in model '{class_name}'\") _class_", "of a model. Error in model '{class_name}'\") _class_ = super().__new__(mcs, 'x_' + class_name,", "dictionaries of keys and values. Example: ```python User(name='<NAME>', profession='Teacher') User({'name': '<NAME>', 'profession': 'Teacher'})", "= () fields_down = () exclude_fields_up = () exclude_fields_down = () exclude_values_up =", "and n in attrs: raise ValueError(f\"Proxy model '{class_name}' can not define new field:", "raise ValueError(f\"Proxy model '{class_name}' can not define new field: {n}\") v.name = n", "up=False) -> Iterator[str]: \"\"\"Yields field names that pass include/exclude criteria Args: up (bool,", "= self.Meta.exclude_values_down fields = self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down # new_data = type(data)() for", "def __getattr__(self, k): Meta = self.__dict__['Meta'] fields = Meta._fields_ if k in fields:", "{self.__class__.__name__} Meta class. Or it does not have any valid value.') raise AttributeError", "def _post_delete_(self, db): \"\"\"Pre-delete hook. Override to run post delete cleanup. Args: db", "if inherit: v = getattr(BaseMeta, k, v) # mutable values can be changed", "# for internal use # morm.db must not be imported here. Meta =", "if classcell is not None: new_attrs['__classcell__'] = classcell return super().__new__(mcs, class_name, bases, new_attrs)", "can be changed by other class meta change if mutable: meta_attrs[k] = copy.deepcopy(v)", "exclude_values_up in {self.__class__.__name__} Meta class. Or you are trying to set an invalid", "def _check_field_name_(self, n: str) -> str: \"\"\"Return the field name if exists else", "spelling mistake } ``` \"\"\" class Meta: # The following needs to be", "everything # included there will be blindly inherited, while these are passed #", "You should not try to change it.\") if k.startswith('_'): if k.endswith('_'): raise AttributeError('_<name>_", "exclude_values and v in exclude_values['']: return False return True def _is_valid_up_value_(self, k: str,", "# new_data = type(data)() for k,v in data.items(): if not self._is_valid_key_(k, fields, exclude_fields):", "be changed by other class meta change if mutable: meta_attrs[k] = copy.deepcopy(v) else:", "class meta change if mutable: meta_attrs[k] = copy.deepcopy(v) else: meta_attrs[k] = v _set_meta_attr('proxy',", "parents: return super().__new__(mcs, class_name, bases, attrs) classcell = attrs.pop('__classcell__', None) class _Meta_(mt.Meta): pass", "str, v: Any, exclude_values: Dict[str, Tuple[Any]]) -> bool: \"\"\"Returns True if the value", "yield k def _get_FieldValue_data_valid_(self, data: dict, up=False) -> Iterator[Tuple[str, Any]]: \"\"\"Yields valid key,value", "Args: db (DB): db handle. \"\"\" pass async def _pre_insert_(self, db): \"\"\"Pre-insert hook.", "{self.__class__.__name__} Meta class. Or you are trying to set an invalid value: {v}')", "def _get_field_name(n: str) -> str: if n in meta_attrs['_field_defs_']: return n else: raise", "k, v) for k,v in kwargs.items(): setattr(self, k, v) def __iter__(self): \"\"\"Iter through", "exclude_fields_down or exclude_values_down in {self.__class__.__name__} Meta class. Or it does not have any", "True if the key is valid considering include/exclude down keys \"\"\" return self._is_valid_key_(k,", "Base(Model): class Meta: pk = 'id' # setting primary key, it is defaulted", "str, v: Any) -> bool: \"\"\"Check whether the key and value is valid", "continue yield k, v # def _get_data_for_valid_values_(self, data, up=False, gen=False): # if up:", "bases: tuple, attrs: dict): # Ensure initialization is only performed for subclasses of", "keys and values. Example: ```python User(name='<NAME>', profession='Teacher') User({'name': '<NAME>', 'profession': 'Teacher'}) User({'name': '<NAME>',", "9) class User(Base): class Meta: db_table = 'myapp_user' abstract = False # default", "should not try to change it.\") if k.startswith('_'): if k.endswith('_'): raise AttributeError('_<name>_ such", "= meta_attrs['db_table'] # Field must not contain table_name, because it is void when", "update cleanup. Args: db (DB): db handle. \"\"\" pass class Model(ModelBase): \"\"\"Base model", "retrieval). Defaults to False. Yields: Iterator[Tuple[str, Any]]: Yields key, value pair \"\"\" if", "db_table = Void abstract = True proxy = False ordering = () fields_up", "# For client use class _FieldNames(): \"\"\"Access field names \"\"\" def __init__(self, func):", "v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field must not contain table_name, because it is void", "exclude_fields_up = () exclude_fields_down = () exclude_values_up = {'':()} exclude_values_down = {'':()} #internal", "= Field('varchar(255)') profession = Field('varchar(255)', default='Unknown') random = Field('integer', default=get_rand) # function can", "n # v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field must not contain table_name, because it", "not inspect.isclass(meta): #TEST: Meta is restricted as a class raise TypeError(f\"Name 'Meta' is", "up values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_up) def _is_valid_down_value_(self, k: str, v: Any)", "meta_attrs['_field_defs_']: return n else: raise AttributeError(f\"No such field '{n}' in model '{class_name}'\") meta_attrs['f']", "class raise TypeError(f\"Name 'Meta' is reserved for a class to pass configuration or", "one should be inherited and which one should not. abstract = True def", "n in meta_attrs['_field_defs_']: return n else: raise AttributeError(f\"No such field '{n}' in model", "self.Meta._fields_ if k in fields: fields[k].delete_value() else: super().__delattr__(k) def __getattr__(self, k): Meta =", "\"\"\"Base model to be inherited by other models. It's more than a good", "does not have a valid base or super base non-proxy model\") else: _set_meta_attr('abstract',", "or exclude_values_down in {self.__class__.__name__} Meta class. Or it does not have any valid", "continue yield k def _get_FieldValue_data_valid_(self, data: dict, up=False) -> Iterator[Tuple[str, Any]]: \"\"\"Yields valid", "in model '{class_name}'\") _class_ = super().__new__(mcs, 'x_' + class_name, parents, attrs) BaseMeta =", "to Model __init__ method. Expected: dictionary or keyword argument\") for k,v in arg_items:", "it's an abstract model or not \"\"\" return self.Meta.abstract def _is_proxy_(self) -> bool:", "v) def __iter__(self): \"\"\"Iter through k, v where k is field name and", "Any, exclude_values: Dict[str, Tuple[Any]]) -> bool: \"\"\"Returns True if the value for the", "abstract and it gets inherited. meta_attrs['_field_defs_'][n] = v elif n in attrs: new_attrs[n]", "password = Field('varchar(255)') ``` An advanced model could look like: ```python import random", "determine which one should be inherited and which one should not. abstract =", "all_fields = self._get_all_fields_() for k in all_fields: if not self._is_valid_key_(k, fields, exclude_keys): continue", "return super().__new__(mcs, class_name, bases, attrs) classcell = attrs.pop('__classcell__', None) class _Meta_(mt.Meta): pass meta", "direction is either `ASC` or `DESC` Args: quote (str): Quote to apply to", "invalid type of argument is provided. ## Special Model Meta attribute `f`: You", "exclude_values = self.Meta.exclude_values_up # else: # exclude_values = self.Meta.exclude_values_down # new_data = type(data)()", "return f'{self.__class__.__name__}({body})' async def _pre_save_(self, db): \"\"\"Pre-save hook. Override to run pre save", "to run pre delete cleanup. Args: db (DB): db handle. \"\"\" pass async", "way to write the field names. If you misspell the name, you will", "class Meta: pk = 'id' # setting primary key, it is defaulted to", "Meta class. Or you are trying to set an invalid value: {v}') def", "'<NAME> <<EMAIL>>' __copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>' __license__ = '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ =", "meta_attrs[k] = copy.deepcopy(v) else: meta_attrs[k] = v _set_meta_attr('proxy', False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering', ())", "Any) -> bool: \"\"\"Check whether the key and value is valid for up", "bool: \"\"\"Check whether the key and value is valid for up (data update)", "column, direction \"\"\" ordering = self.Meta.ordering direction = 'ASC' for o in ordering:", "client use class _FieldNames(): \"\"\"Access field names \"\"\" def __init__(self, func): self.__dict__['func'] =", "k, v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v) for arg in args: try: arg_items", "Meta is restricted as a class raise TypeError(f\"Name 'Meta' is reserved for a", "'Teacher'}) User({'name': '<NAME>', 'profession': 'Teacher'}, age=34) User({'name': '<NAME>', 'profession': 'Teacher', 'active': True}, age=34)", "values. Example: ```python User(name='<NAME>', profession='Teacher') User({'name': '<NAME>', 'profession': 'Teacher'}) User({'name': '<NAME>', 'profession': 'Teacher'},", "pre delete cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_save_(self,", "= [] # super(ModelBase, self).__setattr__('Meta', Meta) self.__dict__['Meta'] = Meta for k, v in", "\"\"\"Model. \"\"\" __author__ = '<NAME> <<EMAIL>>' __copyright__ = 'Copyright © <NAME> <https://github.com/neurobin/>' __license__", "False (down). Yields: str: field name \"\"\" if up: fields = self.Meta.fields_up exclude_keys", "return False if '' in exclude_values and v in exclude_values['']: return False return", "\"\"\"Return the field name if exists else raise AttributeError Args: n (str): field", "in exclude_values: if v in exclude_values[k]: return False if '' in exclude_values and", "db handle. \"\"\" pass async def _post_delete_(self, db): \"\"\"Pre-delete hook. Override to run", "Meta = self.__dict__['Meta'] fields = Meta._fields_ if k in fields: v = fields[k].value", "model '{class_name}' can not be a proxy model. It does not have a", "look like this: ```python class User(Base): name = Field('varchar(65)') email = Field('varchar(255)') password", "v, mutable=False, inherit=True, internal=False): try: given_value = getattr(meta, k) if internal: raise ValueError(f\"'{k}'", "column, direction direction is either `ASC` or `DESC` Args: quote (str): Quote to", "(dict): data to be validated. up (bool, optional): whether up (data update) or", "field `{k}`. It is excluded using either exclude_fields_up or exclude_values_up in {self.__class__.__name__} Meta", "f in self.Meta._fields_.items(): if self.__class__._is_valid_down_(k, f.value): yield k, f.value def __delattr__(self, k): fields", "_set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_',", "k.endswith('_'): raise AttributeError('_<name>_ such names are reserved for predefined methods.') self.__dict__[k] = v", "'Teacher'}, age=34) User({'name': '<NAME>', 'profession': 'Teacher', 'active': True}, age=34) ``` Raises: TypeError: If", "\"\"\"Pre-insert hook. Override to run post insert cleanup. Args: db (DB): db handle.", "get `AttributeError`. ```python f = User.Meta.f my_data = { f.name: '<NAME>', # safe", "= _FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta'] = MetaClass if classcell is", "provided. ## Special Model Meta attribute `f`: You can access field names from", "age=34) User({'name': '<NAME>', 'profession': 'Teacher', 'active': True}, age=34) ``` Raises: TypeError: If invalid", "Iterator from collections import OrderedDict import copy from abc import ABCMeta from asyncpg", "in model '{self.__class__.__name__}''\") # v = fields[k].clean(v) # super().__setattr__(k, v) if self.__class__._is_valid_up_(k, v):", "minimal model could look like this: ```python class User(Base): name = Field('varchar(65)') email", "def _get_fields_(self, up=False) -> Iterator[str]: \"\"\"Yields field names that pass include/exclude criteria Args:", "= 'id' # setting primary key, it is defaulted to 'id' abstract =", "TypeError(f\"Name 'Meta' is reserved for a class to pass configuration or metadata of", "\"\"\"Pre-save hook. Override to run pre save cleanup. Args: db (DB): db handle.", "is reserved for a class to pass configuration or metadata of a model.", "for k,v in data.items(): # if not self._is_valid_value_(k, v, exclude_values): # continue #", "pass async def _pre_delete_(self, db): \"\"\"Pre-delete hook. Override to run pre delete cleanup.", "Do not inherit from this class, use Model instead. Raises: TypeError: When invalid", "if mutable: meta_attrs[k] = copy.deepcopy(v) else: meta_attrs[k] = v _set_meta_attr('proxy', False) _set_meta_attr('pk', 'id')", "instance keyword arguments initialize corresponding fields according to the keys. Positional arguments must", "_set_meta_attr('proxy', False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down',", "Dict[str, Field]: Dictionary of all fields \"\"\" return self.Meta._field_defs_ def _check_field_name_(self, n: str)", "the key is valid considering exclude up values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_up)", "metadata for model \"\"\" # The following needs to be defined here, not", "optional): whether up (data update) or down (data retrieval). Defaults to False. Yields:", "model or not \"\"\" return self.Meta.abstract def _is_proxy_(self) -> bool: \"\"\"Whether it is", "in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v) for arg in args: try: arg_items = arg.items()", "unsafe from spelling mistake } ``` \"\"\" class Meta: # The following needs", "mutable: meta_attrs[k] = copy.deepcopy(v) else: meta_attrs[k] = v _set_meta_attr('proxy', False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering',", "AttributeError Args: n (str): field name Raises: AttributeError: if field name does not", "methods.') self.__dict__[k] = v return fields = self.Meta._fields_ if k not in fields:", "k:str, fields:Tuple[str], exclude_keys:Tuple[str]) -> bool: \"\"\"Returns True if the key is valid considering", "run post insert cleanup. Args: db (DB): db handle. \"\"\" pass async def", "start with underscore.\") if meta_attrs['proxy'] and n in attrs: raise ValueError(f\"Proxy model '{class_name}'", "= 'DESC' o = o[1:] elif o.startswith('+'): o = o[1:] o = f\"{quote}{o}{quote}\"", "__setattr__(self, k, v): raise NotImplementedError class ModelType(type): Meta: typing.ClassVar # fixing mypy error:", "Dict[str, Field] _fields_: Dict[str, FieldValue] _fromdb_: List[str] def __init__(self, *args, **kwargs): class Meta:", "if up: fields = self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up else: fields = self.Meta.fields_down exclude_keys", "v = fields[k].value if self.__class__._is_valid_down_(k, v): return v raise AttributeError(f'Invalid attempt to access", "db (DB): db handle. \"\"\" pass async def _pre_update_(self, db): \"\"\"Pre-update hook. Override", "AttributeError(f\"Invalid field name '{n}' in model '{class_name}'. \\ Field name must not start", "attrs.pop('Meta', _Meta_) if not inspect.isclass(meta): #TEST: Meta is restricted as a class raise", "def _post_update_(self, db): \"\"\"Pre-update hook. Override to run post update cleanup. Args: db", "Override to run post save cleanup. Args: db (DB): db handle. \"\"\" pass", "if fields and k not in fields: return False return True def _is_valid_down_key_(self,", "to be inherited by other models. It's more than a good practice to", "= False # default is False # ... etc... # see morm.meta.Meta for", "in kwargs.items(): setattr(self, k, v) def __iter__(self): \"\"\"Iter through k, v where k", "return n else: raise AttributeError(f\"No such field `{n}` in model `{self.__name__}`\") def _get_fields_(self,", "```python from morm.model import Model from morm.datetime import timestamp class Base(Model): class Meta:", "Meta) self.__dict__['Meta'] = Meta for k, v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v) for", "Model Meta attribute `f`: You can access field names from `ModelClass.Meta.f`. This allows", "def _get_FieldValue_data_valid_(self, data: dict, up=False) -> Iterator[Tuple[str, Any]]: \"\"\"Yields valid key,value pairs from", "f\"{quote}{o}{quote}\" yield o, direction class ModelBase(metaclass=ModelType): \"\"\"Base Model for all models. Do not", "not in meta.Meta # meta.Meta is used in client Models, thus everything #", "For client use class _FieldNames(): \"\"\"Access field names \"\"\" def __init__(self, func): self.__dict__['func']", "abstract = False # default is False proxy = False # default is", "self: reprs.append(f'{k}={repr(v)}') body = ', '.join(reprs) return f'{self.__class__.__name__}({body})' async def _pre_save_(self, db): \"\"\"Pre-save", "this after finalizing meta_attr def _get_field_name(n: str) -> str: if n in meta_attrs['_field_defs_']:", "self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self, k: str, v: Any, exclude_values: Dict[str, Tuple[Any]]) -> bool:", "be imported here. Meta = mt.Meta # For client use class _FieldNames(): \"\"\"Access", "k == 'Meta': raise AttributeError(f\"Name '{k} is reserved. You should not try to", "args: try: arg_items = arg.items() except AttributeError: raise TypeError(f\"Invalid argument type ({type(arg)}) to", "import inspect import typing from typing import Optional, Dict, List, Tuple, TypeVar, Union,", "from typing import Optional, Dict, List, Tuple, TypeVar, Union, Any, Iterator from collections", "key''' db_table = Void abstract = True proxy = False ordering = ()", "User({'name': '<NAME>', 'profession': 'Teacher'}, age=34) User({'name': '<NAME>', 'profession': 'Teacher', 'active': True}, age=34) ```", "v.name = n # v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field must not contain table_name,", "data. Validity is checked against include/exclude key/value criteria. Args: data (dict): data to", "up: # exclude_values = self.Meta.exclude_values_up # else: # exclude_values = self.Meta.exclude_values_down # new_data", "def _is_valid_down_key_(self, k: str) -> bool: \"\"\"Returns True if the key is valid", "_get_data_for_valid_values_(self, data, up=False, gen=False): # if up: # exclude_values = self.Meta.exclude_values_up # else:", "'ASC' for o in ordering: if o.startswith('-'): direction = 'DESC' o = o[1:]", "_set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_', {}, internal=True,", "field `{k}`. It is excluded using either exclude_fields_down or exclude_values_down in {self.__class__.__name__} Meta", "data, up=False, gen=False): # if up: # exclude_values = self.Meta.exclude_values_up # else: #", "configuration or metadata of a model. Error in model '{class_name}'\") _class_ = super().__new__(mcs,", "can not be a proxy model. It does not have a valid base", "column Yields: Iterator[Tuple[str, str]]: Yields column, direction \"\"\" ordering = self.Meta.ordering direction =", "= Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', sql_alter=('ALTER TABLE \"{table}\" ALTER COLUMN \"{column}\"", "_get_fields_(self, up=False) -> Iterator[str]: \"\"\"Yields field names that pass include/exclude criteria Args: up", "field names that pass include/exclude criteria Args: up (bool, optional): up criteria or", "model '{self.__class__.__name__}''\") # v = fields[k].clean(v) # super().__setattr__(k, v) if self.__class__._is_valid_up_(k, v): if", "must not contain table_name, because it is void when model is abstract and", "'<NAME>', # safe from spelling mistake f.profession: 'Teacher', # safe from spelling mistake", "\"\"\" if up: exclude_values = self.Meta.exclude_values_up fields = self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up else:", "# new_data[k] = v # if not gen: # return new_data def _get_db_table_(self)", "self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v) def _get_all_fields_(self) -> Dict[str, Field]: \"\"\"Get all fields on", "contain table_name, because it is void when model is abstract and it gets", "v _set_meta_attr('proxy', False) _set_meta_attr('pk', 'id') _set_meta_attr('ordering', ()) _set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ())", "(down). Yields: str: field name \"\"\" if up: fields = self.Meta.fields_up exclude_keys =", "k, v): raise NotImplementedError class ModelType(type): Meta: typing.ClassVar # fixing mypy error: \"ModelType\"", "\"\"\" return self._is_valid_up_key_(k) and self._is_valid_up_value_(k, v) def _get_all_fields_(self) -> Dict[str, Field]: \"\"\"Get all", "field name does not exist Returns: str: field name \"\"\" if n in", "if self.__class__._is_valid_up_(k, v): if k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k) fields[k].value =", "def _get_data_for_valid_values_(self, data, up=False, gen=False): # if up: # exclude_values = self.Meta.exclude_values_up #", "Yields: Iterator[Tuple[str, str]]: Yields column, direction \"\"\" ordering = self.Meta.ordering direction = 'ASC'", "model '{class_name}'\") given_type = type(given_value) required_type = type(v) if not given_type is required_type:", "meta_attrs['proxy']: #proxy model inherits everything try: meta_attrs['db_table'] = BaseMeta.db_table meta_attrs['abstract'] = BaseMeta.abstract except", "\"\"\" return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v) def _is_valid_up_(self, k: str, v: Any) ->", "\"\"\" return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k: str) -> bool: \"\"\"Returns True", "data.items(): # if not self._is_valid_value_(k, v, exclude_values): # continue # if gen: #", "inherit=False) if meta_attrs['abstract']: meta_attrs['db_table'] = Void else: _set_meta_attr('db_table', class_name, inherit=False) new_attrs = {}", "\"ModelType\" has no attribute \"Meta\" def __new__(mcs, class_name: str, bases: tuple, attrs: dict):", "restriction. Returns: Dict[str, Field]: Dictionary of all fields \"\"\" return self.Meta._field_defs_ def _check_field_name_(self,", "value is valid for down (data retrieval) \"\"\" return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v)", "'{class_name}'. \\ Field name must not start with underscore.\") if meta_attrs['proxy'] and n", "class Base(Model): class Meta: pk = 'id' # setting primary key, it is", "return fields = self.Meta._fields_ if k not in fields: raise AttributeError(f\"No such field", "def _is_proxy_(self) -> bool: \"\"\"Whether it is a proxy model or not \"\"\"", "= arg.items() except AttributeError: raise TypeError(f\"Invalid argument type ({type(arg)}) to Model __init__ method.", "= self.Meta.exclude_values_up # else: # exclude_values = self.Meta.exclude_values_down # new_data = type(data)() #", "{required_type}.\") meta_attrs[k] = given_value except AttributeError: if inherit: v = getattr(BaseMeta, k, v)", "set field `{k}`. It is excluded using either exclude_fields_up or exclude_values_up in {self.__class__.__name__}", "Args: db (DB): db handle. \"\"\" pass async def _post_update_(self, db): \"\"\"Pre-update hook.", "_is_abstract_(self) -> bool: \"\"\"Whether it's an abstract model or not \"\"\" return self.Meta.abstract", "apply to the column Yields: Iterator[Tuple[str, str]]: Yields column, direction \"\"\" ordering =", "self._is_valid_key_(k, fields, exclude_fields): continue if not self._is_valid_value_(k, v.value, exclude_values): continue yield k, v", "# if gen: # yield k, v # else: # new_data[k] = v", "or super base non-proxy model\") else: _set_meta_attr('abstract', False, inherit=False) if meta_attrs['abstract']: meta_attrs['db_table'] =", "TIME ZONE', sql_onadd='NOT NULL', value=timestamp) ``` Then a minimal model could look like", "ordering: if o.startswith('-'): direction = 'DESC' o = o[1:] elif o.startswith('+'): o =", "considering exclude up values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_up) def _is_valid_down_value_(self, k: str,", "changed by other class meta change if mutable: meta_attrs[k] = copy.deepcopy(v) else: meta_attrs[k]", "be dictionaries of keys and values. Example: ```python User(name='<NAME>', profession='Teacher') User({'name': '<NAME>', 'profession':", "_get_field_name(n: str) -> str: if n in meta_attrs['_field_defs_']: return n else: raise AttributeError(f\"No", "str) -> bool: \"\"\"Returns True if the key is valid considering include/exclude up", "Override to run post update cleanup. Args: db (DB): db handle. \"\"\" pass", "not try to change it.\") if k.startswith('_'): if k.endswith('_'): raise AttributeError('_<name>_ such names", "is encountered AttributeError: When misspelled fields are tried to set. \"\"\" class Meta:", "False return True def _is_valid_down_key_(self, k: str) -> bool: \"\"\"Returns True if the", "def __new__(mcs, class_name: str, bases: tuple, attrs: dict): # Ensure initialization is only", "fields on model without applying any restriction. Returns: Dict[str, Field]: Dictionary of all", "is void when model is abstract and it gets inherited. meta_attrs['_field_defs_'][n] = v", "there will be blindly inherited, while these are passed # through the metaclasses", "exclude down values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_down) def _is_valid_down_(self, k: str, v:", "exclude_values = self.Meta.exclude_values_up fields = self.Meta.fields_up exclude_fields = self.Meta.exclude_fields_up else: exclude_values = self.Meta.exclude_values_down", "it does not have any valid value.') raise AttributeError def __setattr__(self, k: str,", "valid key,value pairs from data. Validity is checked against include/exclude key/value criteria. Args:", "is valid considering exclude up values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_up) def _is_valid_down_value_(self,", "\"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_down) def _is_valid_down_(self, k: str, v: Any) -> bool:", "keys \"\"\" return self._is_valid_key_(k, self.Meta.fields_down, self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k: str) -> bool: \"\"\"Returns", "dict, up=False) -> Iterator[Tuple[str, Any]]: \"\"\"Yields valid key,value pairs from data. Validity is", "other models. It's more than a good practice to define a Base model", "non-proxy model\") else: _set_meta_attr('abstract', False, inherit=False) if meta_attrs['abstract']: meta_attrs['db_table'] = Void else: _set_meta_attr('db_table',", "sql_onadd='NOT NULL', sql_alter=('ALTER TABLE \"{table}\" ALTER COLUMN \"{column}\" SET DEFAULT NOW()',)) updated_at =", "'{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta'] = MetaClass if", "if the value for the key is valid considering exclude down values \"\"\"", "if k not in fields: raise AttributeError(f\"No such field ('{k}') in model '{self.__class__.__name__}''\")", "async def _pre_update_(self, db): \"\"\"Pre-update hook. Override to run pre update cleanup. Args:", "is only performed for subclasses of Model # excluding Model class itself. parents", "if k in self.Meta._fromdb_: fields[k]._ignore_first_change_count_ = True self.Meta._fromdb_.remove(k) fields[k].value = v elif k", "set an invalid value: {v}') def __repr__(self): reprs = [] for k, v", "down (data retrieval). Defaults to False. Yields: Iterator[Tuple[str, Any]]: Yields key, value pair", "TypeError: When invalid type is encountered AttributeError: When misspelled fields are tried to", "Override to run pre delete cleanup. Args: db (DB): db handle. \"\"\" pass", "default is False # ... etc... # see morm.meta.Meta for supported meta attributes.", "field: {n}\") v.name = n # v.sql_conf.conf['table_name'] = meta_attrs['db_table'] # Field must not", "k in fields: fields[k].delete_value() else: super().__delattr__(k) def __getattr__(self, k): Meta = self.__dict__['Meta'] fields", "restricted as a class raise TypeError(f\"Name 'Meta' is reserved for a class to", "exclude_values_down = {'':()} #internal _field_defs_: Dict[str, Field] _fields_: Dict[str, FieldValue] _fromdb_: List[str] def", "# def _get_data_for_valid_values_(self, data, up=False, gen=False): # if up: # exclude_values = self.Meta.exclude_values_up", "_get_ordering_(self, quote: str) -> Iterator[Tuple[str, str]]: \"\"\"Yield each ordering from model parsed and", "should not. pk = 'id' '''Primary key''' db_table = Void abstract = True", "() exclude_fields_down = () exclude_values_up = {'':()} exclude_values_down = {'':()} #internal _field_defs_: Dict[str,", "f.value def __delattr__(self, k): fields = self.Meta._fields_ if k in fields: fields[k].delete_value() else:", "None: new_attrs['__classcell__'] = classcell return super().__new__(mcs, class_name, bases, new_attrs) def __setattr__(self, k, v):", "()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()}, mutable=True) _set_meta_attr('_field_defs_', {}, internal=True, mutable=True)", "_get_all_fields_(self) -> Dict[str, Field]: \"\"\"Get all fields on model without applying any restriction.", "'active': True}, age=34) ``` Raises: TypeError: If invalid type of argument is provided.", "__version__ = '0.0.1' import inspect import typing from typing import Optional, Dict, List,", "misspelled fields are tried to set. \"\"\" class Meta: \"\"\"Meta that holds metadata", "Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL', value=timestamp) ``` Then a minimal model could", "Model __init__ method. Expected: dictionary or keyword argument\") for k,v in arg_items: setattr(self,", "profession='Teacher') User({'name': '<NAME>', 'profession': 'Teacher'}) User({'name': '<NAME>', 'profession': 'Teacher'}, age=34) User({'name': '<NAME>', 'profession':", "Model from morm.datetime import timestamp class Base(Model): class Meta: pk = 'id' #", "typing.ClassVar # fixing mypy error: \"ModelType\" has no attribute \"Meta\" def __new__(mcs, class_name:", "v) for k,v in kwargs.items(): setattr(self, k, v) def __iter__(self): \"\"\"Iter through k,", "\"\"\"Pre-update hook. Override to run post update cleanup. Args: db (DB): db handle.", "for k,v in arg_items: setattr(self, k, v) for k,v in kwargs.items(): setattr(self, k,", "return v raise AttributeError(f'Invalid attempt to access field `{k}`. It is excluded using", "handle. \"\"\" pass class Model(ModelBase): \"\"\"Base model to be inherited by other models.", "import OrderedDict import copy from abc import ABCMeta from asyncpg import Record #", "is restricted as a class raise TypeError(f\"Name 'Meta' is reserved for a class", "not inherit from this class, use Model instead. Raises: TypeError: When invalid type", "'Gardenning', # unsafe from spelling mistake } ``` \"\"\" class Meta: # The", "hook. Override to run pre update cleanup. Args: db (DB): db handle. \"\"\"", "= o[1:] o = f\"{quote}{o}{quote}\" yield o, direction class ModelBase(metaclass=ModelType): \"\"\"Base Model for", "'Teacher', 'active': True}, age=34) ``` Raises: TypeError: If invalid type of argument is", "db handle. \"\"\" pass async def _post_save_(self, db): \"\"\"Pre-save hook. Override to run", "Model for all models. Do not inherit from this class, use Model instead.", "= '[BSD](http://www.opensource.org/licenses/bsd-license.php)' __version__ = '0.0.1' import inspect import typing from typing import Optional,", "determine which one should be inherited and which one should not. pk =", "return self.__dict__['func'](k) def __setattr__(self, k, v): raise NotImplementedError class ModelType(type): Meta: typing.ClassVar #", "cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_update_(self, db): \"\"\"Pre-update", "def _is_valid_up_(self, k: str, v: Any) -> bool: \"\"\"Check whether the key and", "-> bool: \"\"\"Returns True if the value for the key is valid considering", "whether up (data update) or down (data retrieval). Defaults to False. Yields: Iterator[Tuple[str,", "classcell = attrs.pop('__classcell__', None) class _Meta_(mt.Meta): pass meta = attrs.pop('Meta', _Meta_) if not", "getattr(meta, k) if internal: raise ValueError(f\"'{k}' is a reserved attribute for class Meta.", "async def _post_delete_(self, db): \"\"\"Pre-delete hook. Override to run post delete cleanup. Args:", "_get_FieldValue_data_valid_(self, data: dict, up=False) -> Iterator[Tuple[str, Any]]: \"\"\"Yields valid key,value pairs from data.", "access field names from `ModelClass.Meta.f`. This allows a spell-safe way to write the", "-> bool: \"\"\"Check whether the key and value is valid for down (data", "v) # mutable values can be changed by other class meta change if", "if n in self.Meta._field_defs_: return n else: raise AttributeError(f\"No such field `{n}` in", "User(name='<NAME>', profession='Teacher') User({'name': '<NAME>', 'profession': 'Teacher'}) User({'name': '<NAME>', 'profession': 'Teacher'}, age=34) User({'name': '<NAME>',", "finalizing meta_attr def _get_field_name(n: str) -> str: if n in meta_attrs['_field_defs_']: return n", "mutable=True) _set_meta_attr('_field_defs_', {}, internal=True, mutable=True) if meta_attrs['proxy']: #proxy model inherits everything try: meta_attrs['db_table']", "v) def _is_valid_up_(self, k: str, v: Any) -> bool: \"\"\"Check whether the key", "= self.Meta.exclude_fields_up else: exclude_values = self.Meta.exclude_values_down fields = self.Meta.fields_down exclude_fields = self.Meta.exclude_fields_down #", "(data retrieval) \"\"\" return self._is_valid_down_key_(k) and self._is_valid_down_value_(k, v) def _is_valid_up_(self, k: str, v:", "attrs: raise ValueError(f\"Proxy model '{class_name}' can not define new field: {n}\") v.name =", "class_name: str, bases: tuple, attrs: dict): # Ensure initialization is only performed for", "except AttributeError: if inherit: v = getattr(BaseMeta, k, v) # mutable values can", "is valid considering exclude down values \"\"\" return self._is_valid_value_(k, v, self.Meta.exclude_values_down) def _is_valid_down_(self,", "= super().__new__(mcs, 'x_' + class_name, parents, attrs) BaseMeta = getattr(_class_, 'Meta', _Meta_) meta_attrs", "v in self.__class__.Meta._field_defs_.items(): self.Meta._fields_[k] = FieldValue(v) for arg in args: try: arg_items =", "return self._is_valid_value_(k, v, self.Meta.exclude_values_up) def _is_valid_down_value_(self, k: str, v: Any) -> bool: \"\"\"Returns", "self.Meta.exclude_fields_down) def _is_valid_up_key_(self, k: str) -> bool: \"\"\"Returns True if the key is", "else: # exclude_values = self.Meta.exclude_values_down # new_data = type(data)() # for k,v in", "performed for subclasses of Model # excluding Model class itself. parents = tuple(b", "from model parsed and converted to column, direction direction is either `ASC` or", "except AttributeError: raise TypeError(f\"This model '{class_name}' can not be a proxy model. It", "name \"\"\" return self.Meta.pk def _get_ordering_(self, quote: str) -> Iterator[Tuple[str, str]]: \"\"\"Yield each", "we do this after finalizing meta_attr def _get_field_name(n: str) -> str: if n", "and self._is_valid_up_value_(k, v) def _get_all_fields_(self) -> Dict[str, Field]: \"\"\"Get all fields on model", "True if the key is valid considering include/exclude keys \"\"\" if k in", "_is_valid_up_value_(self, k: str, v: Any) -> bool: \"\"\"Returns True if the value for", "for k, v in self: reprs.append(f'{k}={repr(v)}') body = ', '.join(reprs) return f'{self.__class__.__name__}({body})' async", "morm.meta.Meta for supported meta attributes. name = Field('varchar(65)') email = Field('varchar(255)') password =", "a Base model first: ```python from morm.model import Model from morm.datetime import timestamp", "save cleanup. Args: db (DB): db handle. \"\"\" pass async def _post_delete_(self, db):", "if field name does not exist Returns: str: field name \"\"\" if n", "handle. \"\"\" pass async def _pre_delete_(self, db): \"\"\"Pre-delete hook. Override to run pre", "= self.Meta.fields_up exclude_keys = self.Meta.exclude_fields_up else: fields = self.Meta.fields_down exclude_keys = self.Meta.exclude_fields_down all_fields", "super().__delattr__(k) def __getattr__(self, k): Meta = self.__dict__['Meta'] fields = Meta._fields_ if k in", "if not inspect.isclass(meta): #TEST: Meta is restricted as a class raise TypeError(f\"Name 'Meta'", "in fields: v = fields[k].value if self.__class__._is_valid_down_(k, v): return v raise AttributeError(f'Invalid attempt", "def _pre_delete_(self, db): \"\"\"Pre-delete hook. Override to run pre delete cleanup. Args: db", "fields: return False return True def _is_valid_down_key_(self, k: str) -> bool: \"\"\"Returns True", "not \"\"\" return self.Meta.abstract def _is_proxy_(self) -> bool: \"\"\"Whether it is a proxy", "else: raise AttributeError(f\"No such field '{n}' in model '{class_name}'\") meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass", "for arg in args: try: arg_items = arg.items() except AttributeError: raise TypeError(f\"Invalid argument", "FieldValue(v) for arg in args: try: arg_items = arg.items() except AttributeError: raise TypeError(f\"Invalid", "Override to run post insert cleanup. Args: db (DB): db handle. \"\"\" pass", "Positional arguments must be dictionaries of keys and values. Example: ```python User(name='<NAME>', profession='Teacher')", "the value for the key is valid considering exclude values \"\"\" if v", "k) if internal: raise ValueError(f\"'{k}' is a reserved attribute for class Meta. Error", "\"\"\" return self._is_valid_key_(k, self.Meta.fields_up, self.Meta.exclude_fields_up) def _is_valid_value_(self, k: str, v: Any, exclude_values: Dict[str,", "meta_attrs = {} def _set_meta_attr(k, v, mutable=False, inherit=True, internal=False): try: given_value = getattr(meta,", "not gen: # return new_data def _get_db_table_(self) -> str: \"\"\"Get db table name", "_class_.__dict__.items(): if isinstance(v, Field): if n.startswith('_'): raise AttributeError(f\"Invalid field name '{n}' in model", "internal: raise ValueError(f\"'{k}' is a reserved attribute for class Meta. Error in model", "Field, FieldValue from morm.types import Void import morm.meta as mt # for internal", "_set_meta_attr('fields_up', ()) _set_meta_attr('fields_down', ()) _set_meta_attr('exclude_fields_up', ()) _set_meta_attr('exclude_fields_down', ()) _set_meta_attr('exclude_values_up', {'':()}, mutable=True) _set_meta_attr('exclude_values_down', {'':()},", "meta_attrs['f'] = _FieldNames(_get_field_name) MetaClass = mt.MetaType('Meta', (mt.Meta,), meta_attrs) new_attrs['Meta'] = MetaClass if classcell", "all_fields: if not self._is_valid_key_(k, fields, exclude_keys): continue yield k def _get_FieldValue_data_valid_(self, data: dict,", "# Field must not contain table_name, because it is void when model is", "Field('SERIAL', sql_onadd='PRIMARY KEY NOT NULL') created_at = Field('TIMESTAMP WITH TIME ZONE', sql_onadd='NOT NULL',", "a proxy model or not \"\"\" return self.Meta.proxy def _get_pk_(self) -> str: \"\"\"Get", "while these are passed # through the metaclasses __new__ methods and processed accordingly", "class_name, bases, new_attrs) def __setattr__(self, k, v): raise NotImplementedError(\"You can not set model", "k, v): raise NotImplementedError(\"You can not set model attributes outside model definition.\") def", "python 3.7 for n, v in _class_.__dict__.items(): if isinstance(v, Field): if n.startswith('_'): raise", "`ASC` or `DESC` Args: quote (str): Quote to apply to the column Yields:", "up=False, gen=False): # if up: # exclude_values = self.Meta.exclude_values_up # else: # exclude_values" ]
[ "doc='身份证正面') id_card_back = db.Column(db.String, doc='身份证背面') id_card_handheld = db.Column(db.String, doc='手持身份证') ctime = db.Column('create_time', db.DateTime,", "db.Column(db.String, doc='简介') certificate = db.Column(db.String, doc='认证') article_count = db.Column(db.Integer, default=0, doc='发帖数') following_count =", "following_count = db.Column(db.Integer, default=0, doc='关注的人数') fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer,", "\"\"\" __tablename__ = 'user_basic' class STATUS: ENABLE = 1 DISABLE = 0 id", "= db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') # id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID')", "doc='头像') last_login = db.Column(db.DateTime, doc='最后登录时间') is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified = db.Column(db.Boolean,", "= db.Column(db.String, doc='手持身份证') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime,", "class RELATION: DELETE = 0 FOLLOW = 1 BLACKLIST = 2 id =", "= db.Column('user_name', db.String, doc='昵称') profile_photo = db.Column(db.String, doc='头像') last_login = db.Column(db.DateTime, doc='最后登录时间') is_media", "default=False, doc='是否实名认证') introduction = db.Column(db.String, doc='简介') certificate = db.Column(db.String, doc='认证') article_count = db.Column(db.Integer,", "= db.Column(db.String, doc='头像') last_login = db.Column(db.DateTime, doc='最后登录时间') is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified", "db.Column(db.String, doc='身份证正面') id_card_back = db.Column(db.String, doc='身份证背面') id_card_handheld = db.Column(db.String, doc='手持身份证') ctime = db.Column('create_time',", "default=False, doc='是否是自媒体') is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证') introduction = db.Column(db.String, doc='简介') certificate =", "= db.Column(db.String, doc='简介') certificate = db.Column(db.String, doc='认证') article_count = db.Column(db.Integer, default=0, doc='发帖数') following_count", "db.Column(db.Integer, default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile', uselist=False) follows = db.relationship('Relation') #", "= db.Column(db.Integer, default=0, doc='发帖数') following_count = db.Column(db.Integer, default=0, doc='关注的人数') fans_count = db.Column(db.Integer, default=0,", "\"\"\" 用户资料表 \"\"\" __tablename__ = 'user_profile' class GENDER: MALE = 0 FEMALE =", "db.Column(db.String, doc='公司') career = db.Column(db.String, doc='职业') class Relation(db.Model): \"\"\" 用户关系表 \"\"\" __tablename__ =", "profile = db.relationship('UserProfile', uselist=False) follows = db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile = db.relationship('UserProfile',", "datetime from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) class", "default=0, doc='性别') birthday = db.Column(db.Date, doc='生日') real_name = db.Column(db.String, doc='真实姓名') id_number = db.Column(db.String,", "FEMALE = 1 # 使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') #", "flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI", "SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db = SQLAlchemy(app) class", "id_number = db.Column(db.String, doc='身份证号') id_card_front = db.Column(db.String, doc='身份证正面') id_card_back = db.Column(db.String, doc='身份证背面') id_card_handheld", "doc='用户ID') # id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') gender = db.Column(db.Integer, default=0, doc='性别')", "primary_key=True, doc='用户ID') gender = db.Column(db.Integer, default=0, doc='性别') birthday = db.Column(db.Date, doc='生日') real_name =", "FROM # user_basic INNER JOIN user_relation ON user_basic.user_id = user_relation.user_id # WHERE user_basic.mobile", "db.Column(db.Integer, default=0, doc='累计点赞人数') read_count = db.Column(db.Integer, default=0, doc='累计阅读人数') account = db.Column(db.String, doc='账号') email", "= db.Column(db.String, doc='身份证背面') id_card_handheld = db.Column(db.String, doc='手持身份证') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间')", "= db.Column(db.DateTime, doc='注册自媒体时间') area = db.Column(db.String, doc='地区') company = db.Column(db.String, doc='公司') career =", "= db.Column(db.String, doc='真实姓名') id_number = db.Column(db.String, doc='身份证号') id_card_front = db.Column(db.String, doc='身份证正面') id_card_back =", "doc='手机号') password = db.Column(db.String, doc='密码') name = db.Column('user_name', db.String, doc='昵称') profile_photo = db.Column(db.String,", "doc='昵称') profile_photo = db.Column(db.String, doc='头像') last_login = db.Column(db.DateTime, doc='最后登录时间') is_media = db.Column(db.Boolean, default=False,", "= db.Column(db.String, doc='身份证正面') id_card_back = db.Column(db.String, doc='身份证背面') id_card_handheld = db.Column(db.String, doc='手持身份证') ctime =", "id_card_back = db.Column(db.String, doc='身份证背面') id_card_handheld = db.Column(db.String, doc='手持身份证') ctime = db.Column('create_time', db.DateTime, default=datetime.now,", "db.Column(db.Integer, doc='目标用户ID') relation = db.Column(db.Integer, doc='关系') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime", "用户关系表 \"\"\" __tablename__ = 'user_relation' class RELATION: DELETE = 0 FOLLOW = 1", "Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\"", "创建操作数据库的管家 db = SQLAlchemy(app) class User(db.Model): \"\"\" 用户基本信息 \"\"\" __tablename__ = 'user_basic' class", "company = db.Column(db.String, doc='公司') career = db.Column(db.String, doc='职业') class Relation(db.Model): \"\"\" 用户关系表 \"\"\"", "'user_profile' class GENDER: MALE = 0 FEMALE = 1 # 使用外键ForeignKey来明确两张表的关系 id =", "default=0, doc='关注的人数') fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer, default=0, doc='累计点赞人数') read_count", "0 FOLLOW = 1 BLACKLIST = 2 id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID')", "db.Column('user_name', db.String, doc='昵称') profile_photo = db.Column(db.String, doc='头像') last_login = db.Column(db.DateTime, doc='最后登录时间') is_media =", "= Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True", "ON user_basic.user_id = user_relation.user_id # WHERE user_basic.mobile = '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出", "is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证') introduction = db.Column(db.String,", "= db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model): \"\"\"", "User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户 被哪些用户关注 用户名 # SELECT user_basic.user_name FROM # user_basic INNER", "= db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time", "doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') #", "# 查询出 编号为1的用户 被哪些用户关注 用户名 # SELECT user_basic.user_name FROM # user_basic INNER JOIN", "db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id = db.Column(db.Integer, doc='目标用户ID') relation = db.Column(db.Integer, doc='关系') ctime = db.Column('create_time',", "doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time = db.Column(db.DateTime, doc='注册自媒体时间') area", "= db.Column(db.String, doc='公司') career = db.Column(db.String, doc='职业') class Relation(db.Model): \"\"\" 用户关系表 \"\"\" __tablename__", "# 查询出 手机号为13912345678的用户关注了哪些用户 用户id # SELECT user_basic.user_id,user_relation.target_user_id FROM # user_basic INNER JOIN user_relation", "introduction = db.Column(db.String, doc='简介') certificate = db.Column(db.String, doc='认证') article_count = db.Column(db.Integer, default=0, doc='发帖数')", "register_media_time = db.Column(db.DateTime, doc='注册自媒体时间') area = db.Column(db.String, doc='地区') company = db.Column(db.String, doc='公司') career", "from datetime import datetime from flask import Flask from flask_sqlalchemy import SQLAlchemy app", "= 0 id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') mobile = db.Column(db.String, doc='手机号') password", "db.Column(db.String, doc='职业') class Relation(db.Model): \"\"\" 用户关系表 \"\"\" __tablename__ = 'user_relation' class RELATION: DELETE", "= db.relationship('UserProfile', uselist=False) follows = db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)',", "# profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model): \"\"\" 用户资料表 \"\"\" __tablename__ =", "RELATION: DELETE = 0 FOLLOW = 1 BLACKLIST = 2 id = db.Column('relation_id',", "db.String, doc='昵称') profile_photo = db.Column(db.String, doc='头像') last_login = db.Column(db.DateTime, doc='最后登录时间') is_media = db.Column(db.Boolean,", "db.Column(db.Integer, doc='关系') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now,", "= 1 BLACKLIST = 2 id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID') # user_id", "2 id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID') # user_id = db.Column(db.Integer, doc='用户ID') user_id", "= db.Column(db.Integer, default=0, doc='累计阅读人数') account = db.Column(db.String, doc='账号') email = db.Column(db.String, doc='邮箱') status", "import datetime from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__)", "user_basic.user_name FROM # user_basic INNER JOIN user_relation # ON user_basic.user_id=user_relation.target_user_id # WHERE user_basic.user_id=1", "id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID') # user_id = db.Column(db.Integer, doc='用户ID') user_id =", "= db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证') introduction = db.Column(db.String, doc='简介')", "doc='认证') article_count = db.Column(db.Integer, default=0, doc='发帖数') following_count = db.Column(db.Integer, default=0, doc='关注的人数') fans_count =", "doc='邮箱') status = db.Column(db.Integer, default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile', uselist=False) follows", "ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间')", "like_count = db.Column(db.Integer, default=0, doc='累计点赞人数') read_count = db.Column(db.Integer, default=0, doc='累计阅读人数') account = db.Column(db.String,", "app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db = SQLAlchemy(app) class User(db.Model): \"\"\" 用户基本信息 \"\"\" __tablename__ =", "datetime import datetime from flask import Flask from flask_sqlalchemy import SQLAlchemy app =", "用户基本信息 \"\"\" __tablename__ = 'user_basic' class STATUS: ENABLE = 1 DISABLE = 0", "\"\"\" 用户关系表 \"\"\" __tablename__ = 'user_relation' class RELATION: DELETE = 0 FOLLOW =", "db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') # id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') gender =", "= 0 FEMALE = 1 # 使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True,", "db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model): \"\"\" 用户资料表", "= db.Column(db.String, doc='密码') name = db.Column('user_name', db.String, doc='昵称') profile_photo = db.Column(db.String, doc='头像') last_login", "SELECT user_basic.user_name FROM # user_basic INNER JOIN user_relation # ON user_basic.user_id=user_relation.target_user_id # WHERE", "DISABLE = 0 id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') mobile = db.Column(db.String, doc='手机号')", "DELETE = 0 FOLLOW = 1 BLACKLIST = 2 id = db.Column('relation_id', db.Integer,", "= 2 id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID') # user_id = db.Column(db.Integer, doc='用户ID')", "relation = db.Column(db.Integer, doc='关系') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time',", "db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time = db.Column(db.DateTime, doc='注册自媒体时间') area = db.Column(db.String, doc='地区')", "class Relation(db.Model): \"\"\" 用户关系表 \"\"\" __tablename__ = 'user_relation' class RELATION: DELETE = 0", "utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出", "account = db.Column(db.String, doc='账号') email = db.Column(db.String, doc='邮箱') status = db.Column(db.Integer, default=1, doc='状态,是否可用')", "1 # 使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') # id =", "# 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model): \"\"\" 用户资料表 \"\"\"", "target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户 用户id # SELECT user_basic.user_id,user_relation.target_user_id FROM #", "1 BLACKLIST = 2 id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID') # user_id =", "doc='累计阅读人数') account = db.Column(db.String, doc='账号') email = db.Column(db.String, doc='邮箱') status = db.Column(db.Integer, default=1,", "db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user = db.relationship('User',", "db.Column(db.String, doc='身份证号') id_card_front = db.Column(db.String, doc='身份证正面') id_card_back = db.Column(db.String, doc='身份证背面') id_card_handheld = db.Column(db.String,", "db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') gender = db.Column(db.Integer, default=0, doc='性别') birthday = db.Column(db.Date, doc='生日')", "= True app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db = SQLAlchemy(app) class User(db.Model): \"\"\" 用户基本信息 \"\"\"", "doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer, default=0, doc='累计点赞人数') read_count = db.Column(db.Integer, default=0, doc='累计阅读人数') account =", "__tablename__ = 'user_profile' class GENDER: MALE = 0 FEMALE = 1 # 使用外键ForeignKey来明确两张表的关系", "UserProfile(db.Model): \"\"\" 用户资料表 \"\"\" __tablename__ = 'user_profile' class GENDER: MALE = 0 FEMALE", "doc='注册自媒体时间') area = db.Column(db.String, doc='地区') company = db.Column(db.String, doc='公司') career = db.Column(db.String, doc='职业')", "db.Column(db.String, doc='密码') name = db.Column('user_name', db.String, doc='昵称') profile_photo = db.Column(db.String, doc='头像') last_login =", "MALE = 0 FEMALE = 1 # 使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'),", "用户id # SELECT user_basic.user_id,user_relation.target_user_id FROM # user_basic INNER JOIN user_relation ON user_basic.user_id =", "User(db.Model): \"\"\" 用户基本信息 \"\"\" __tablename__ = 'user_basic' class STATUS: ENABLE = 1 DISABLE", "= db.Column(db.String, doc='职业') class Relation(db.Model): \"\"\" 用户关系表 \"\"\" __tablename__ = 'user_relation' class RELATION:", "read_count = db.Column(db.Integer, default=0, doc='累计阅读人数') account = db.Column(db.String, doc='账号') email = db.Column(db.String, doc='邮箱')", "'user_relation' class RELATION: DELETE = 0 FOLLOW = 1 BLACKLIST = 2 id", "doc='发帖数') following_count = db.Column(db.Integer, default=0, doc='关注的人数') fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count =", "user_relation ON user_basic.user_id = user_relation.user_id # WHERE user_basic.mobile = '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() #", "查询出 编号为1的用户 被哪些用户关注 用户名 # SELECT user_basic.user_name FROM # user_basic INNER JOIN user_relation", "db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id = db.Column(db.Integer, doc='目标用户ID') relation = db.Column(db.Integer, doc='关系') ctime =", "doc='密码') name = db.Column('user_name', db.String, doc='昵称') profile_photo = db.Column(db.String, doc='头像') last_login = db.Column(db.DateTime,", "= db.Column(db.String, doc='手机号') password = db.Column(db.String, doc='密码') name = db.Column('user_name', db.String, doc='昵称') profile_photo", "doc='最后登录时间') is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证') introduction =", "Relation(db.Model): \"\"\" 用户关系表 \"\"\" __tablename__ = 'user_relation' class RELATION: DELETE = 0 FOLLOW", "= 1 # 使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') # id", "= user_relation.user_id # WHERE user_basic.mobile = '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户 被哪些用户关注", "db.Column(db.DateTime, doc='最后登录时间') is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证') introduction", "app = Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO =", "BLACKLIST = 2 id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID') # user_id = db.Column(db.Integer,", "career = db.Column(db.String, doc='职业') class Relation(db.Model): \"\"\" 用户关系表 \"\"\" __tablename__ = 'user_relation' class", "primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户 用户id # SELECT user_basic.user_id,user_relation.target_user_id FROM # user_basic INNER JOIN", "article_count = db.Column(db.Integer, default=0, doc='发帖数') following_count = db.Column(db.Integer, default=0, doc='关注的人数') fans_count = db.Column(db.Integer,", "default=0, doc='发帖数') following_count = db.Column(db.Integer, default=0, doc='关注的人数') fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count", "default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time = db.Column(db.DateTime, doc='注册自媒体时间')", "fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer, default=0, doc='累计点赞人数') read_count = db.Column(db.Integer,", "'user_basic' class STATUS: ENABLE = 1 DISABLE = 0 id = db.Column('user_id', db.Integer,", "primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model): \"\"\" 用户资料表 \"\"\" __tablename__ = 'user_profile' class GENDER: MALE", "= db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户", "= db.Column(db.Integer, default=0, doc='性别') birthday = db.Column(db.Date, doc='生日') real_name = db.Column(db.String, doc='真实姓名') id_number", "default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)')", "uselist=False) follows = db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class", "doc='更新时间') register_media_time = db.Column(db.DateTime, doc='注册自媒体时间') area = db.Column(db.String, doc='地区') company = db.Column(db.String, doc='公司')", "db = SQLAlchemy(app) class User(db.Model): \"\"\" 用户基本信息 \"\"\" __tablename__ = 'user_basic' class STATUS:", "profile_photo = db.Column(db.String, doc='头像') last_login = db.Column(db.DateTime, doc='最后登录时间') is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体')", "db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID') # user_id = db.Column(db.Integer, doc='用户ID') user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'),", "class MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig) #", "= db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户 用户id # SELECT user_basic.user_id,user_relation.target_user_id FROM # user_basic", "id_card_handheld = db.Column(db.String, doc='手持身份证') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time',", "default=0, doc='累计点赞人数') read_count = db.Column(db.Integer, default=0, doc='累计阅读人数') account = db.Column(db.String, doc='账号') email =", "is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证') introduction = db.Column(db.String, doc='简介') certificate = db.Column(db.String, doc='认证')", "= 0 FOLLOW = 1 BLACKLIST = 2 id = db.Column('relation_id', db.Integer, primary_key=True,", "= db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time = db.Column(db.DateTime, doc='注册自媒体时间') area = db.Column(db.String,", "gender = db.Column(db.Integer, default=0, doc='性别') birthday = db.Column(db.Date, doc='生日') real_name = db.Column(db.String, doc='真实姓名')", "area = db.Column(db.String, doc='地区') company = db.Column(db.String, doc='公司') career = db.Column(db.String, doc='职业') class", "db.Column(db.String, doc='头像') last_login = db.Column(db.DateTime, doc='最后登录时间') is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified =", "= '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户 被哪些用户关注 用户名 # SELECT user_basic.user_name FROM", "用户名 # SELECT user_basic.user_name FROM # user_basic INNER JOIN user_relation # ON user_basic.user_id=user_relation.target_user_id", "default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile', uselist=False) follows = db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性", "MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig) # 创建操作数据库的管家", "WHERE user_basic.mobile = '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户 被哪些用户关注 用户名 # SELECT", "user_basic.mobile = '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户 被哪些用户关注 用户名 # SELECT user_basic.user_name", "utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time = db.Column(db.DateTime, doc='注册自媒体时间') area =", "from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) class MySQLConfig(object):", "follows = db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model):", "= db.Column(db.String, doc='身份证号') id_card_front = db.Column(db.String, doc='身份证正面') id_card_back = db.Column(db.String, doc='身份证背面') id_card_handheld =", "default=0, doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer, default=0, doc='累计点赞人数') read_count = db.Column(db.Integer, default=0, doc='累计阅读人数') account", "db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户 用户id #", "primary_key=True, doc='用户ID') # id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') gender = db.Column(db.Integer, default=0,", "id_card_front = db.Column(db.String, doc='身份证正面') id_card_back = db.Column(db.String, doc='身份证背面') id_card_handheld = db.Column(db.String, doc='手持身份证') ctime", "编号为1的用户 被哪些用户关注 用户名 # SELECT user_basic.user_name FROM # user_basic INNER JOIN user_relation #", "doc='身份证号') id_card_front = db.Column(db.String, doc='身份证正面') id_card_back = db.Column(db.String, doc='身份证背面') id_card_handheld = db.Column(db.String, doc='手持身份证')", "user_basic.user_id,user_relation.target_user_id FROM # user_basic INNER JOIN user_relation ON user_basic.user_id = user_relation.user_id # WHERE", "onupdate=datetime.now, doc='更新时间') register_media_time = db.Column(db.DateTime, doc='注册自媒体时间') area = db.Column(db.String, doc='地区') company = db.Column(db.String,", "db.Column(db.String, doc='账号') email = db.Column(db.String, doc='邮箱') status = db.Column(db.Integer, default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性", "db.Column(db.String, doc='邮箱') status = db.Column(db.Integer, default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile', uselist=False)", "db.Column(db.String, doc='真实姓名') id_number = db.Column(db.String, doc='身份证号') id_card_front = db.Column(db.String, doc='身份证正面') id_card_back = db.Column(db.String,", "= db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') gender = db.Column(db.Integer, default=0, doc='性别') birthday = db.Column(db.Date,", "profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model): \"\"\" 用户资料表 \"\"\" __tablename__ = 'user_profile'", "primary_key=True, doc='主键ID') # user_id = db.Column(db.Integer, doc='用户ID') user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id", "db.Column(db.Integer, default=0, doc='发帖数') following_count = db.Column(db.Integer, default=0, doc='关注的人数') fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)')", "import SQLAlchemy app = Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False", "查询出 手机号为13912345678的用户关注了哪些用户 用户id # SELECT user_basic.user_id,user_relation.target_user_id FROM # user_basic INNER JOIN user_relation ON", "# 使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile', uselist=False) follows = db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile", "onupdate=datetime.now, doc='更新时间') target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户 用户id # SELECT user_basic.user_id,user_relation.target_user_id", "db.Column(db.Boolean, default=False, doc='是否实名认证') introduction = db.Column(db.String, doc='简介') certificate = db.Column(db.String, doc='认证') article_count =", "flask_sqlalchemy import SQLAlchemy app = Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS =", "手机号为13912345678的用户关注了哪些用户 用户id # SELECT user_basic.user_id,user_relation.target_user_id FROM # user_basic INNER JOIN user_relation ON user_basic.user_id", "doc='简介') certificate = db.Column(db.String, doc='认证') article_count = db.Column(db.Integer, default=0, doc='发帖数') following_count = db.Column(db.Integer,", "db.Integer, primary_key=True, doc='用户ID') gender = db.Column(db.Integer, default=0, doc='性别') birthday = db.Column(db.Date, doc='生日') real_name", "doc='公司') career = db.Column(db.String, doc='职业') class Relation(db.Model): \"\"\" 用户关系表 \"\"\" __tablename__ = 'user_relation'", "db.Column(db.String, doc='认证') article_count = db.Column(db.Integer, default=0, doc='发帖数') following_count = db.Column(db.Integer, default=0, doc='关注的人数') fans_count", "doc='地区') company = db.Column(db.String, doc='公司') career = db.Column(db.String, doc='职业') class Relation(db.Model): \"\"\" 用户关系表", "target_user_id = db.Column(db.Integer, doc='目标用户ID') relation = db.Column(db.Integer, doc='关系') ctime = db.Column('create_time', db.DateTime, default=datetime.now,", "= db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID') # user_id = db.Column(db.Integer, doc='用户ID') user_id = db.Column(db.Integer,", "user_basic.user_id = user_relation.user_id # WHERE user_basic.mobile = '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户", "\"\"\" __tablename__ = 'user_profile' class GENDER: MALE = 0 FEMALE = 1 #", "# User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户 被哪些用户关注 用户名 # SELECT user_basic.user_name FROM # user_basic", "db.Integer, primary_key=True, doc='用户ID') mobile = db.Column(db.String, doc='手机号') password = db.Column(db.String, doc='密码') name =", "= \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db =", "last_login = db.Column(db.DateTime, doc='最后登录时间') is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified = db.Column(db.Boolean, default=False,", "db.Column(db.Integer, default=0, doc='性别') birthday = db.Column(db.Date, doc='生日') real_name = db.Column(db.String, doc='真实姓名') id_number =", "使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile', uselist=False) follows = db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile =", "doc='关注的人数') fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer, default=0, doc='累计点赞人数') read_count =", "import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI =", "= db.Column(db.String, doc='邮箱') status = db.Column(db.Integer, default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile',", "db.Column(db.Date, doc='生日') real_name = db.Column(db.String, doc='真实姓名') id_number = db.Column(db.String, doc='身份证号') id_card_front = db.Column(db.String,", "0 id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') mobile = db.Column(db.String, doc='手机号') password =", "email = db.Column(db.String, doc='邮箱') status = db.Column(db.Integer, default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile =", "doc='职业') class Relation(db.Model): \"\"\" 用户关系表 \"\"\" __tablename__ = 'user_relation' class RELATION: DELETE =", "db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') mobile = db.Column(db.String, doc='手机号') password = db.Column(db.String, doc='密码') name", "db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户 用户id # SELECT user_basic.user_id,user_relation.target_user_id FROM # user_basic INNER", "db.Column(db.Integer, default=0, doc='关注的人数') fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer, default=0, doc='累计点赞人数')", "# WHERE user_basic.mobile = '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户 被哪些用户关注 用户名 #", "= db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id = db.Column(db.Integer, doc='目标用户ID') relation = db.Column(db.Integer, doc='关系') ctime", "id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') gender = db.Column(db.Integer, default=0, doc='性别') birthday =", "uselist=False) class UserProfile(db.Model): \"\"\" 用户资料表 \"\"\" __tablename__ = 'user_profile' class GENDER: MALE =", "db.Column(db.String, doc='手持身份证') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now,", "= False SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db = SQLAlchemy(app) class User(db.Model):", "default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time = db.Column(db.DateTime, doc='注册自媒体时间') area = db.Column(db.String, doc='地区') company =", "SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db = SQLAlchemy(app) class User(db.Model): \"\"\" 用户基本信息", "doc='主键ID') # user_id = db.Column(db.Integer, doc='用户ID') user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id =", "= db.Column(db.DateTime, doc='最后登录时间') is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证')", "ENABLE = 1 DISABLE = 0 id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') mobile", "doc='性别') birthday = db.Column(db.Date, doc='生日') real_name = db.Column(db.String, doc='真实姓名') id_number = db.Column(db.String, doc='身份证号')", "db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer, default=0, doc='累计点赞人数') read_count = db.Column(db.Integer, default=0, doc='累计阅读人数')", "db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') # id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') gender", "= db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') mobile = db.Column(db.String, doc='手机号') password = db.Column(db.String, doc='密码')", "SQLAlchemy app = Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO", "FOLLOW = 1 BLACKLIST = 2 id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID') #", "db.Integer, primary_key=True, doc='主键ID') # user_id = db.Column(db.Integer, doc='用户ID') user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID')", "= db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model): \"\"\" 用户资料表 \"\"\" __tablename__ = 'user_profile' class", "doc='用户ID') target_user_id = db.Column(db.Integer, doc='目标用户ID') relation = db.Column(db.Integer, doc='关系') ctime = db.Column('create_time', db.DateTime,", "= db.Column(db.Integer, default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile', uselist=False) follows = db.relationship('Relation')", "用户资料表 \"\"\" __tablename__ = 'user_profile' class GENDER: MALE = 0 FEMALE = 1", "doc='真实姓名') id_number = db.Column(db.String, doc='身份证号') id_card_front = db.Column(db.String, doc='身份证正面') id_card_back = db.Column(db.String, doc='身份证背面')", "name = db.Column('user_name', db.String, doc='昵称') profile_photo = db.Column(db.String, doc='头像') last_login = db.Column(db.DateTime, doc='最后登录时间')", "0 FEMALE = 1 # 使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID')", "= 'user_profile' class GENDER: MALE = 0 FEMALE = 1 # 使用外键ForeignKey来明确两张表的关系 id", "= db.Column(db.String, doc='认证') article_count = db.Column(db.Integer, default=0, doc='发帖数') following_count = db.Column(db.Integer, default=0, doc='关注的人数')", "class GENDER: MALE = 0 FEMALE = 1 # 使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id',", "= 'user_basic' class STATUS: ENABLE = 1 DISABLE = 0 id = db.Column('user_id',", "db.Column(db.Integer, default=0, doc='累计阅读人数') account = db.Column(db.String, doc='账号') email = db.Column(db.String, doc='邮箱') status =", "doc='累计点赞人数') read_count = db.Column(db.Integer, default=0, doc='累计阅读人数') account = db.Column(db.String, doc='账号') email = db.Column(db.String,", "# id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') gender = db.Column(db.Integer, default=0, doc='性别') birthday", "doc='关系') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now,", "True app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db = SQLAlchemy(app) class User(db.Model): \"\"\" 用户基本信息 \"\"\" __tablename__", "class User(db.Model): \"\"\" 用户基本信息 \"\"\" __tablename__ = 'user_basic' class STATUS: ENABLE = 1", "# user_basic INNER JOIN user_relation # ON user_basic.user_id=user_relation.target_user_id # WHERE user_basic.user_id=1 # Relation.query.join(Relation.target_user).options(contains_eager(Relation.target_user).load_only(User.name),load_only(Relation.target_user_id)).filter(User.id==1).all()", "被哪些用户关注 用户名 # SELECT user_basic.user_name FROM # user_basic INNER JOIN user_relation # ON", "JOIN user_relation ON user_basic.user_id = user_relation.user_id # WHERE user_basic.mobile = '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all()", "__tablename__ = 'user_relation' class RELATION: DELETE = 0 FOLLOW = 1 BLACKLIST =", "SQLAlchemy(app) class User(db.Model): \"\"\" 用户基本信息 \"\"\" __tablename__ = 'user_basic' class STATUS: ENABLE =", "1 DISABLE = 0 id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') mobile = db.Column(db.String,", "= db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer, default=0, doc='累计点赞人数') read_count = db.Column(db.Integer, default=0,", "= 1 DISABLE = 0 id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') mobile =", "= db.Column(db.String, doc='地区') company = db.Column(db.String, doc='公司') career = db.Column(db.String, doc='职业') class Relation(db.Model):", "doc='用户ID') user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id = db.Column(db.Integer, doc='目标用户ID') relation = db.Column(db.Integer,", "db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time = db.Column(db.DateTime,", "mobile = db.Column(db.String, doc='手机号') password = db.Column(db.String, doc='密码') name = db.Column('user_name', db.String, doc='昵称')", "使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model): \"\"\" 用户资料表 \"\"\" __tablename__", "SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db", "db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False) class UserProfile(db.Model): \"\"\" 用户资料表 \"\"\" __tablename__ = 'user_profile' class GENDER:", "INNER JOIN user_relation ON user_basic.user_id = user_relation.user_id # WHERE user_basic.mobile = '13912345678' #", "default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户 用户id # SELECT", "db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户 用户id", "db.Column(db.DateTime, doc='注册自媒体时间') area = db.Column(db.String, doc='地区') company = db.Column(db.String, doc='公司') career = db.Column(db.String,", "# user_basic INNER JOIN user_relation ON user_basic.user_id = user_relation.user_id # WHERE user_basic.mobile =", "db.Column(db.Integer, doc='用户ID') user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id = db.Column(db.Integer, doc='目标用户ID') relation =", "= db.Column(db.String, doc='账号') email = db.Column(db.String, doc='邮箱') status = db.Column(db.Integer, default=1, doc='状态,是否可用') #", "primary_key=True, doc='用户ID') mobile = db.Column(db.String, doc='手机号') password = db.Column(db.String, doc='密码') name = db.Column('user_name',", "id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') # id = db.Column('user_id', db.Integer, primary_key=True,", "db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') # id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') gender = db.Column(db.Integer,", "doc='是否实名认证') introduction = db.Column(db.String, doc='简介') certificate = db.Column(db.String, doc='认证') article_count = db.Column(db.Integer, default=0,", "default=0, doc='累计阅读人数') account = db.Column(db.String, doc='账号') email = db.Column(db.String, doc='邮箱') status = db.Column(db.Integer,", "db.Column(db.String, doc='地区') company = db.Column(db.String, doc='公司') career = db.Column(db.String, doc='职业') class Relation(db.Model): \"\"\"", "__tablename__ = 'user_basic' class STATUS: ENABLE = 1 DISABLE = 0 id =", "GENDER: MALE = 0 FEMALE = 1 # 使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id', db.Integer,", "Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig)", "birthday = db.Column(db.Date, doc='生日') real_name = db.Column(db.String, doc='真实姓名') id_number = db.Column(db.String, doc='身份证号') id_card_front", "SELECT user_basic.user_id,user_relation.target_user_id FROM # user_basic INNER JOIN user_relation ON user_basic.user_id = user_relation.user_id #", "= db.Column(db.Date, doc='生日') real_name = db.Column(db.String, doc='真实姓名') id_number = db.Column(db.String, doc='身份证号') id_card_front =", "= db.Column(db.Integer, default=0, doc='累计点赞人数') read_count = db.Column(db.Integer, default=0, doc='累计阅读人数') account = db.Column(db.String, doc='账号')", "doc='是否是自媒体') is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证') introduction = db.Column(db.String, doc='简介') certificate = db.Column(db.String,", "db.Column(db.Boolean, default=False, doc='是否是自媒体') is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证') introduction = db.Column(db.String, doc='简介') certificate", "db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user =", "False SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db = SQLAlchemy(app) class User(db.Model): \"\"\"", "\"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True app.config.from_object(MySQLConfig) # 创建操作数据库的管家 db = SQLAlchemy(app)", "db.Column(db.String, doc='手机号') password = db.Column(db.String, doc='密码') name = db.Column('user_name', db.String, doc='昵称') profile_photo =", "user_id = db.Column(db.Integer, doc='用户ID') user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id = db.Column(db.Integer, doc='目标用户ID')", "= db.Column(db.Integer, default=0, doc='关注的人数') fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)') like_count = db.Column(db.Integer, default=0,", "= db.Column(db.Integer, doc='关系') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime,", "certificate = db.Column(db.String, doc='认证') article_count = db.Column(db.Integer, default=0, doc='发帖数') following_count = db.Column(db.Integer, default=0,", "doc='身份证背面') id_card_handheld = db.Column(db.String, doc='手持身份证') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime =", "# 使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') # id = db.Column('user_id',", "doc='手持身份证') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now,", "doc='用户ID') gender = db.Column(db.Integer, default=0, doc='性别') birthday = db.Column(db.Date, doc='生日') real_name = db.Column(db.String,", "doc='目标用户ID') relation = db.Column(db.Integer, doc='关系') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime =", "\"\"\" 用户基本信息 \"\"\" __tablename__ = 'user_basic' class STATUS: ENABLE = 1 DISABLE =", "db.Column(db.String, doc='身份证背面') id_card_handheld = db.Column(db.String, doc='手持身份证') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime", "\"\"\" __tablename__ = 'user_relation' class RELATION: DELETE = 0 FOLLOW = 1 BLACKLIST", "使用外键ForeignKey来明确两张表的关系 id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID') # id = db.Column('user_id', db.Integer,", "STATUS: ENABLE = 1 DISABLE = 0 id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID')", "user_basic INNER JOIN user_relation ON user_basic.user_id = user_relation.user_id # WHERE user_basic.mobile = '13912345678'", "user_relation.user_id # WHERE user_basic.mobile = '13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户 被哪些用户关注 用户名", "doc='账号') email = db.Column(db.String, doc='邮箱') status = db.Column(db.Integer, default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile", "class UserProfile(db.Model): \"\"\" 用户资料表 \"\"\" __tablename__ = 'user_profile' class GENDER: MALE = 0", "# SELECT user_basic.user_id,user_relation.target_user_id FROM # user_basic INNER JOIN user_relation ON user_basic.user_id = user_relation.user_id", "= 'user_relation' class RELATION: DELETE = 0 FOLLOW = 1 BLACKLIST = 2", "= db.Column(db.Integer, doc='用户ID') user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id = db.Column(db.Integer, doc='目标用户ID') relation", "user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id = db.Column(db.Integer, doc='目标用户ID') relation = db.Column(db.Integer, doc='关系')", "doc='生日') real_name = db.Column(db.String, doc='真实姓名') id_number = db.Column(db.String, doc='身份证号') id_card_front = db.Column(db.String, doc='身份证正面')", "db.relationship('UserProfile', uselist=False) follows = db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 # profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False)", "FROM # user_basic INNER JOIN user_relation # ON user_basic.user_id=user_relation.target_user_id # WHERE user_basic.user_id=1 #", "= db.Column(db.Boolean, default=False, doc='是否实名认证') introduction = db.Column(db.String, doc='简介') certificate = db.Column(db.String, doc='认证') article_count", "# 创建操作数据库的管家 db = SQLAlchemy(app) class User(db.Model): \"\"\" 用户基本信息 \"\"\" __tablename__ = 'user_basic'", "= db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') target_user", "from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) class MySQLConfig(object): SQLALCHEMY_DATABASE_URI = \"mysql://root:mysql@127.0.0.1:3306/toutiao\" SQLALCHEMY_TRACK_MODIFICATIONS", "password = db.Column(db.String, doc='密码') name = db.Column('user_name', db.String, doc='昵称') profile_photo = db.Column(db.String, doc='头像')", "'13912345678' # User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all() # 查询出 编号为1的用户 被哪些用户关注 用户名 # SELECT user_basic.user_name FROM #", "= SQLAlchemy(app) class User(db.Model): \"\"\" 用户基本信息 \"\"\" __tablename__ = 'user_basic' class STATUS: ENABLE", "# user_id = db.Column(db.Integer, doc='用户ID') user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID') target_user_id = db.Column(db.Integer,", "real_name = db.Column(db.String, doc='真实姓名') id_number = db.Column(db.String, doc='身份证号') id_card_front = db.Column(db.String, doc='身份证正面') id_card_back", "doc='用户ID') mobile = db.Column(db.String, doc='手机号') password = db.Column(db.String, doc='密码') name = db.Column('user_name', db.String,", "class STATUS: ENABLE = 1 DISABLE = 0 id = db.Column('user_id', db.Integer, primary_key=True,", "doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile', uselist=False) follows = db.relationship('Relation') # 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性 #", "= db.Column(db.Integer, doc='目标用户ID') relation = db.Column(db.Integer, doc='关系') ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间')", "status = db.Column(db.Integer, default=1, doc='状态,是否可用') # 使用补充的relationship字段明确触发的属性 profile = db.relationship('UserProfile', uselist=False) follows =", "db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time = db.Column(db.DateTime, doc='注册自媒体时间') area = db.Column(db.String, doc='地区') company", "db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间') utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间') register_media_time =", "id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID') mobile = db.Column(db.String, doc='手机号') password = db.Column(db.String,", "doc='更新时间') target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)') # 查询出 手机号为13912345678的用户关注了哪些用户 用户id # SELECT user_basic.user_id,user_relation.target_user_id FROM", "# SELECT user_basic.user_name FROM # user_basic INNER JOIN user_relation # ON user_basic.user_id=user_relation.target_user_id #" ]
[ "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "under the License. \"\"\" import app_server_conf \"\"\"Default configuration settings for NetHead UI\"\"\" #", "\"\"\" import app_server_conf \"\"\"Default configuration settings for NetHead UI\"\"\" # Pathname for log", "the specific language governing permissions and limitations under the License. \"\"\" import app_server_conf", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "app_server_conf \"\"\"Default configuration settings for NetHead UI\"\"\" # Pathname for log files. LOGGING_PATHNAME", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "OF ANY KIND, either express or implied. See the License for the specific", "to in writing, software distributed under the License is distributed on an \"AS", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "not use this file except in compliance with the License. You may obtain", "License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "except in compliance with the License. You may obtain a copy of the", "may not use this file except in compliance with the License. You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "License. \"\"\" import app_server_conf \"\"\"Default configuration settings for NetHead UI\"\"\" # Pathname for", "configuration settings for NetHead UI\"\"\" # Pathname for log files. LOGGING_PATHNAME = 'nethead.log'", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "the License for the specific language governing permissions and limitations under the License.", "ANY KIND, either express or implied. See the License for the specific language", "2020 <NAME> Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "file except in compliance with the License. You may obtain a copy of", "License for the specific language governing permissions and limitations under the License. \"\"\"", "Unless required by applicable law or agreed to in writing, software distributed under", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "2.0 (the \"License\"); you may not use this file except in compliance with", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "See the License for the specific language governing permissions and limitations under the", "permissions and limitations under the License. \"\"\" import app_server_conf \"\"\"Default configuration settings for", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "governing permissions and limitations under the License. \"\"\" import app_server_conf \"\"\"Default configuration settings", "the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "License, Version 2.0 (the \"License\"); you may not use this file except in", "compliance with the License. You may obtain a copy of the License at", "and limitations under the License. \"\"\" import app_server_conf \"\"\"Default configuration settings for NetHead", "(the \"License\"); you may not use this file except in compliance with the", "this file except in compliance with the License. You may obtain a copy", "\"License\"); you may not use this file except in compliance with the License.", "express or implied. See the License for the specific language governing permissions and", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "you may not use this file except in compliance with the License. You", "for the specific language governing permissions and limitations under the License. \"\"\" import", "agreed to in writing, software distributed under the License is distributed on an", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "Copyright 2020 <NAME> Licensed under the Apache License, Version 2.0 (the \"License\"); you", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "by applicable law or agreed to in writing, software distributed under the License", "applicable law or agreed to in writing, software distributed under the License is", "implied. See the License for the specific language governing permissions and limitations under", "\"\"\" Copyright 2020 <NAME> Licensed under the Apache License, Version 2.0 (the \"License\");", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "import app_server_conf \"\"\"Default configuration settings for NetHead UI\"\"\" # Pathname for log files.", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "<NAME> Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "language governing permissions and limitations under the License. \"\"\" import app_server_conf \"\"\"Default configuration", "law or agreed to in writing, software distributed under the License is distributed", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "the License. \"\"\" import app_server_conf \"\"\"Default configuration settings for NetHead UI\"\"\" # Pathname", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "in compliance with the License. You may obtain a copy of the License", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "use this file except in compliance with the License. You may obtain a", "KIND, either express or implied. See the License for the specific language governing", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "in writing, software distributed under the License is distributed on an \"AS IS\"", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "limitations under the License. \"\"\" import app_server_conf \"\"\"Default configuration settings for NetHead UI\"\"\"", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "either express or implied. See the License for the specific language governing permissions", "or agreed to in writing, software distributed under the License is distributed on", "specific language governing permissions and limitations under the License. \"\"\" import app_server_conf \"\"\"Default", "\"\"\"Default configuration settings for NetHead UI\"\"\" # Pathname for log files. LOGGING_PATHNAME =", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "or implied. See the License for the specific language governing permissions and limitations", "with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "required by applicable law or agreed to in writing, software distributed under the", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software" ]
[ "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0014_alter_list_users'), ] operations =", "2022-01-22 15:53 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0014_alter_list_users'),", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0014_alter_list_users'), ] operations", "class Migration(migrations.Migration): dependencies = [ ('app', '0014_alter_list_users'), ] operations = [ migrations.RenameField( model_name='list',", "= [ ('app', '0014_alter_list_users'), ] operations = [ migrations.RenameField( model_name='list', old_name='users', new_name='user', ),", "4.0.1 on 2022-01-22 15:53 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0014_alter_list_users'), ] operations = [", "migrations class Migration(migrations.Migration): dependencies = [ ('app', '0014_alter_list_users'), ] operations = [ migrations.RenameField(", "by Django 4.0.1 on 2022-01-22 15:53 from django.db import migrations class Migration(migrations.Migration): dependencies", "Generated by Django 4.0.1 on 2022-01-22 15:53 from django.db import migrations class Migration(migrations.Migration):", "dependencies = [ ('app', '0014_alter_list_users'), ] operations = [ migrations.RenameField( model_name='list', old_name='users', new_name='user',", "Migration(migrations.Migration): dependencies = [ ('app', '0014_alter_list_users'), ] operations = [ migrations.RenameField( model_name='list', old_name='users',", "# Generated by Django 4.0.1 on 2022-01-22 15:53 from django.db import migrations class", "15:53 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0014_alter_list_users'), ]", "Django 4.0.1 on 2022-01-22 15:53 from django.db import migrations class Migration(migrations.Migration): dependencies =", "on 2022-01-22 15:53 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app',", "[ ('app', '0014_alter_list_users'), ] operations = [ migrations.RenameField( model_name='list', old_name='users', new_name='user', ), ]" ]
[ "print(name.rstrip() + dots) #rstrip Method print(name.strip() + dots) #strip Method print(name.replace(\" \", \"\")", "\" Pervaiz \" dots = \" .........\" print(name.lstrip() + dots) #lstrip Method print(name.rstrip()", "name = \" Pervaiz \" dots = \" .........\" print(name.lstrip() + dots) #lstrip", "\" .........\" print(name.lstrip() + dots) #lstrip Method print(name.rstrip() + dots) #rstrip Method print(name.strip()", "\" dots = \" .........\" print(name.lstrip() + dots) #lstrip Method print(name.rstrip() + dots)", "<filename>Strip_Method.py name = \" Pervaiz \" dots = \" .........\" print(name.lstrip() + dots)", "dots = \" .........\" print(name.lstrip() + dots) #lstrip Method print(name.rstrip() + dots) #rstrip", "Pervaiz \" dots = \" .........\" print(name.lstrip() + dots) #lstrip Method print(name.rstrip() +", "print(name.lstrip() + dots) #lstrip Method print(name.rstrip() + dots) #rstrip Method print(name.strip() + dots)", "#rstrip Method print(name.strip() + dots) #strip Method print(name.replace(\" \", \"\") + dots) #Replace", "= \" .........\" print(name.lstrip() + dots) #lstrip Method print(name.rstrip() + dots) #rstrip Method", "Method print(name.rstrip() + dots) #rstrip Method print(name.strip() + dots) #strip Method print(name.replace(\" \",", "= \" Pervaiz \" dots = \" .........\" print(name.lstrip() + dots) #lstrip Method", ".........\" print(name.lstrip() + dots) #lstrip Method print(name.rstrip() + dots) #rstrip Method print(name.strip() +", "Method print(name.strip() + dots) #strip Method print(name.replace(\" \", \"\") + dots) #Replace Method", "+ dots) #rstrip Method print(name.strip() + dots) #strip Method print(name.replace(\" \", \"\") +", "dots) #rstrip Method print(name.strip() + dots) #strip Method print(name.replace(\" \", \"\") + dots)", "+ dots) #lstrip Method print(name.rstrip() + dots) #rstrip Method print(name.strip() + dots) #strip", "dots) #lstrip Method print(name.rstrip() + dots) #rstrip Method print(name.strip() + dots) #strip Method", "#lstrip Method print(name.rstrip() + dots) #rstrip Method print(name.strip() + dots) #strip Method print(name.replace(\"" ]
[ "self._allSensors = data self._lastUpdatedTime = time.time() def setUseByInterval(self, seconds): if seconds <= 0:", "57 BATTERY_CURRENT = 58 GYRO_ACCUM = 60 class BUFFER_SIZES(Enum): ULTRASONIC = 2 ACCL", "None self._lastUpdatedTime = 0 self._useByInterval = ( 0.25 # cached values older than", "self._lastUpdatedTime = 0 self._useByInterval = ( 0.25 # cached values older than 0.25", "= { OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value),", "- self._lastUpdatedTime > self._useByInterval ): return None if isinstance(opType, Enum): opType = opType.value", "= 58 GYRO_ACCUM = 60 class BUFFER_SIZES(Enum): ULTRASONIC = 2 ACCL = 6", "GYRO_ACCUM = 60 class BUFFER_SIZES(Enum): ULTRASONIC = 2 ACCL = 6 GYRO =", "class BUFFER_POSITIONS(Enum): ULTRASONIC = 0 ACCL = 2 GYRO = 8 COLOUR_ALL =", "in self._startAndSizeIndexForOpType: indices = self._startAndSizeIndexForOpType[opType] return self._allSensors[indices[0] : indices[0] + indices[1]] return None", "6 GYRO = 12 COLOUR_ALL = 30 TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE = 2", "a positive non-zero number\" ) self._useByInterval = seconds def invalidateCache(self): self._allSensors = None", "= 6 GYRO = 12 COLOUR_ALL = 30 TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE =", "from ._comms_constants import MicromelonType as OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC = 0 ACCL =", "= time.time() def setUseByInterval(self, seconds): if seconds <= 0: raise Exception( \"Use by", "def setUseByInterval(self, seconds): if seconds <= 0: raise Exception( \"Use by interval for", "60 class BUFFER_SIZES(Enum): ULTRASONIC = 2 ACCL = 6 GYRO = 12 COLOUR_ALL", "( 0.25 # cached values older than 0.25 seconds will be ignored )", "seconds <= 0: raise Exception( \"Use by interval for RoverReadCache must be a", "<reponame>timmyhadwen/mm-pymodule from enum import Enum import time from ._comms_constants import MicromelonType as OPTYPE", "PERCENTAGE_PADDING = 57 BATTERY_CURRENT = 58 GYRO_ACCUM = 60 class BUFFER_SIZES(Enum): ULTRASONIC =", "positive non-zero number\" ) self._useByInterval = seconds def invalidateCache(self): self._allSensors = None def", "non-zero number\" ) self._useByInterval = seconds def invalidateCache(self): self._allSensors = None def readCache(self,", "= 1 BATTERY_CURRENT = 2 GYRO_ACCUM = 12 class RoverReadCache: def __init__(self) ->", "None: self._allSensors = None self._lastUpdatedTime = 0 self._useByInterval = ( 0.25 # cached", "BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), }", "BUFFER_SIZES.GYRO_ACCUM.value, ), } def updateAllSensors(self, data): self._allSensors = data self._lastUpdatedTime = time.time() def", "be a positive non-zero number\" ) self._useByInterval = seconds def invalidateCache(self): self._allSensors =", "BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING = 1 BATTERY_CURRENT = 2 GYRO_ACCUM", "BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING = 57 BATTERY_CURRENT = 58 GYRO_ACCUM = 60 class", "isinstance(opType, Enum): opType = opType.value if opType in self._startAndSizeIndexForOpType: indices = self._startAndSizeIndexForOpType[opType] return", "2 GYRO = 8 COLOUR_ALL = 20 TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE = 54", "= data self._lastUpdatedTime = time.time() def setUseByInterval(self, seconds): if seconds <= 0: raise", "), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), } def updateAllSensors(self, data): self._allSensors = data", "time.time() - self._lastUpdatedTime > self._useByInterval ): return None if isinstance(opType, Enum): opType =", "= 2 ACCL = 6 GYRO = 12 COLOUR_ALL = 30 TIME_OF_FLIGHT =", "BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: (", "= 54 BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING = 57 BATTERY_CURRENT = 58 GYRO_ACCUM =", "opType = opType.value if opType in self._startAndSizeIndexForOpType: indices = self._startAndSizeIndexForOpType[opType] return self._allSensors[indices[0] :", "seconds def invalidateCache(self): self._allSensors = None def readCache(self, opType): if ( not self._allSensors", "BATTERY_CURRENT = 2 GYRO_ACCUM = 12 class RoverReadCache: def __init__(self) -> None: self._allSensors", "0.25 seconds will be ignored ) self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value,", "import time from ._comms_constants import MicromelonType as OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC = 0", "OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value,", "None def readCache(self, opType): if ( not self._allSensors or time.time() - self._lastUpdatedTime >", "12 class RoverReadCache: def __init__(self) -> None: self._allSensors = None self._lastUpdatedTime = 0", "self._useByInterval = ( 0.25 # cached values older than 0.25 seconds will be", "( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value,", "if opType in self._startAndSizeIndexForOpType: indices = self._startAndSizeIndexForOpType[opType] return self._allSensors[indices[0] : indices[0] + indices[1]]", "return None if isinstance(opType, Enum): opType = opType.value if opType in self._startAndSizeIndexForOpType: indices", "= 12 COLOUR_ALL = 30 TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE =", "= None def readCache(self, opType): if ( not self._allSensors or time.time() - self._lastUpdatedTime", ") self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value:", "20 TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING = 57", "raise Exception( \"Use by interval for RoverReadCache must be a positive non-zero number\"", "self._useByInterval = seconds def invalidateCache(self): self._allSensors = None def readCache(self, opType): if (", "class BUFFER_SIZES(Enum): ULTRASONIC = 2 ACCL = 6 GYRO = 12 COLOUR_ALL =", "( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), } def updateAllSensors(self, data):", "self._lastUpdatedTime = time.time() def setUseByInterval(self, seconds): if seconds <= 0: raise Exception( \"Use", "4 BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING = 1 BATTERY_CURRENT = 2", "54 BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING = 57 BATTERY_CURRENT = 58 GYRO_ACCUM = 60", "= seconds def invalidateCache(self): self._allSensors = None def readCache(self, opType): if ( not", "._comms_constants import MicromelonType as OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC = 0 ACCL = 2", "opType.value if opType in self._startAndSizeIndexForOpType: indices = self._startAndSizeIndexForOpType[opType] return self._allSensors[indices[0] : indices[0] +", "BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), } def updateAllSensors(self, data): self._allSensors", "time.time() def setUseByInterval(self, seconds): if seconds <= 0: raise Exception( \"Use by interval", "as OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC = 0 ACCL = 2 GYRO = 8", "seconds will be ignored ) self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ),", "BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value,", "= 57 BATTERY_CURRENT = 58 GYRO_ACCUM = 60 class BUFFER_SIZES(Enum): ULTRASONIC = 2", "OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: (", "import Enum import time from ._comms_constants import MicromelonType as OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC", "OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value,", "OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value,", "if isinstance(opType, Enum): opType = opType.value if opType in self._startAndSizeIndexForOpType: indices = self._startAndSizeIndexForOpType[opType]", "be ignored ) self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value,", "= None self._lastUpdatedTime = 0 self._useByInterval = ( 0.25 # cached values older", "MicromelonType as OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC = 0 ACCL = 2 GYRO =", "self._useByInterval ): return None if isinstance(opType, Enum): opType = opType.value if opType in", "data self._lastUpdatedTime = time.time() def setUseByInterval(self, seconds): if seconds <= 0: raise Exception(", "), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: (", "( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value,", "ACCL = 6 GYRO = 12 COLOUR_ALL = 30 TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE", "def updateAllSensors(self, data): self._allSensors = data self._lastUpdatedTime = time.time() def setUseByInterval(self, seconds): if", "updateAllSensors(self, data): self._allSensors = data self._lastUpdatedTime = time.time() def setUseByInterval(self, seconds): if seconds", "<= 0: raise Exception( \"Use by interval for RoverReadCache must be a positive", "BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING = 1 BATTERY_CURRENT = 2 GYRO_ACCUM = 12 class", "PERCENTAGE_PADDING = 1 BATTERY_CURRENT = 2 GYRO_ACCUM = 12 class RoverReadCache: def __init__(self)", "BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ),", "( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value,", "(BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value,", "BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value:", "50 BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING = 57 BATTERY_CURRENT = 58", "= 20 TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING =", "BUFFER_POSITIONS(Enum): ULTRASONIC = 0 ACCL = 2 GYRO = 8 COLOUR_ALL = 20", "( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), } def updateAllSensors(self, data): self._allSensors = data self._lastUpdatedTime =", "BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), } def updateAllSensors(self, data): self._allSensors =", "56 PERCENTAGE_PADDING = 57 BATTERY_CURRENT = 58 GYRO_ACCUM = 60 class BUFFER_SIZES(Enum): ULTRASONIC", "-> None: self._allSensors = None self._lastUpdatedTime = 0 self._useByInterval = ( 0.25 #", "opType): if ( not self._allSensors or time.time() - self._lastUpdatedTime > self._useByInterval ): return", "if ( not self._allSensors or time.time() - self._lastUpdatedTime > self._useByInterval ): return None", "8 COLOUR_ALL = 20 TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE = 56", "( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value,", "def __init__(self) -> None: self._allSensors = None self._lastUpdatedTime = 0 self._useByInterval = (", "number\" ) self._useByInterval = seconds def invalidateCache(self): self._allSensors = None def readCache(self, opType):", "12 COLOUR_ALL = 30 TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE = 1", "> self._useByInterval ): return None if isinstance(opType, Enum): opType = opType.value if opType", "BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value:", "seconds): if seconds <= 0: raise Exception( \"Use by interval for RoverReadCache must", "will be ignored ) self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value:", "1 BATTERY_CURRENT = 2 GYRO_ACCUM = 12 class RoverReadCache: def __init__(self) -> None:", ") self._useByInterval = seconds def invalidateCache(self): self._allSensors = None def readCache(self, opType): if", "( not self._allSensors or time.time() - self._lastUpdatedTime > self._useByInterval ): return None if", "ULTRASONIC = 0 ACCL = 2 GYRO = 8 COLOUR_ALL = 20 TIME_OF_FLIGHT", "= ( 0.25 # cached values older than 0.25 seconds will be ignored", "(BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value:", "= 30 TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING =", "time from ._comms_constants import MicromelonType as OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC = 0 ACCL", "2 GYRO_ACCUM = 12 class RoverReadCache: def __init__(self) -> None: self._allSensors = None", "BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value:", "OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), } def updateAllSensors(self, data): self._allSensors = data self._lastUpdatedTime", "interval for RoverReadCache must be a positive non-zero number\" ) self._useByInterval = seconds", "readCache(self, opType): if ( not self._allSensors or time.time() - self._lastUpdatedTime > self._useByInterval ):", "opType in self._startAndSizeIndexForOpType: indices = self._startAndSizeIndexForOpType[opType] return self._allSensors[indices[0] : indices[0] + indices[1]] return", "or time.time() - self._lastUpdatedTime > self._useByInterval ): return None if isinstance(opType, Enum): opType", "COLOUR_ALL = 30 TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING", "enum import Enum import time from ._comms_constants import MicromelonType as OPTYPE class BUFFER_POSITIONS(Enum):", "0: raise Exception( \"Use by interval for RoverReadCache must be a positive non-zero", "BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value,", "BUFFER_SIZES(Enum): ULTRASONIC = 2 ACCL = 6 GYRO = 12 COLOUR_ALL = 30", "OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value,", "GYRO_ACCUM = 12 class RoverReadCache: def __init__(self) -> None: self._allSensors = None self._lastUpdatedTime", "self._allSensors or time.time() - self._lastUpdatedTime > self._useByInterval ): return None if isinstance(opType, Enum):", "= 56 PERCENTAGE_PADDING = 57 BATTERY_CURRENT = 58 GYRO_ACCUM = 60 class BUFFER_SIZES(Enum):", "30 TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING = 1", "= 8 COLOUR_ALL = 20 TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE =", "= 60 class BUFFER_SIZES(Enum): ULTRASONIC = 2 ACCL = 6 GYRO = 12", "Enum import time from ._comms_constants import MicromelonType as OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC =", "from enum import Enum import time from ._comms_constants import MicromelonType as OPTYPE class", "), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: (", "BATTERY_CURRENT = 58 GYRO_ACCUM = 60 class BUFFER_SIZES(Enum): ULTRASONIC = 2 ACCL =", "self._allSensors = None self._lastUpdatedTime = 0 self._useByInterval = ( 0.25 # cached values", "0 self._useByInterval = ( 0.25 # cached values older than 0.25 seconds will", "), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value:", "TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING = 57 BATTERY_CURRENT", "= 0 self._useByInterval = ( 0.25 # cached values older than 0.25 seconds", "BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), } def updateAllSensors(self, data): self._allSensors = data self._lastUpdatedTime = time.time()", "setUseByInterval(self, seconds): if seconds <= 0: raise Exception( \"Use by interval for RoverReadCache", "OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC = 0 ACCL = 2 GYRO = 8 COLOUR_ALL", "): return None if isinstance(opType, Enum): opType = opType.value if opType in self._startAndSizeIndexForOpType:", "( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value,", "OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), } def updateAllSensors(self,", "OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: ( BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ),", "0.25 # cached values older than 0.25 seconds will be ignored ) self._startAndSizeIndexForOpType", "ACCL = 2 GYRO = 8 COLOUR_ALL = 20 TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE", "def readCache(self, opType): if ( not self._allSensors or time.time() - self._lastUpdatedTime > self._useByInterval", "{ OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value:", "RoverReadCache: def __init__(self) -> None: self._allSensors = None self._lastUpdatedTime = 0 self._useByInterval =", "= 0 ACCL = 2 GYRO = 8 COLOUR_ALL = 20 TIME_OF_FLIGHT =", "TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING = 1 BATTERY_CURRENT", "= 1 PERCENTAGE_PADDING = 1 BATTERY_CURRENT = 2 GYRO_ACCUM = 12 class RoverReadCache:", "RoverReadCache must be a positive non-zero number\" ) self._useByInterval = seconds def invalidateCache(self):", "BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ),", "ULTRASONIC = 2 ACCL = 6 GYRO = 12 COLOUR_ALL = 30 TIME_OF_FLIGHT", "# cached values older than 0.25 seconds will be ignored ) self._startAndSizeIndexForOpType =", "), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ), } def", "= 2 GYRO = 8 COLOUR_ALL = 20 TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE =", "GYRO = 12 COLOUR_ALL = 30 TIME_OF_FLIGHT = 4 BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE", "cached values older than 0.25 seconds will be ignored ) self._startAndSizeIndexForOpType = {", "for RoverReadCache must be a positive non-zero number\" ) self._useByInterval = seconds def", "= 12 class RoverReadCache: def __init__(self) -> None: self._allSensors = None self._lastUpdatedTime =", "older than 0.25 seconds will be ignored ) self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value: (", "OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value, BUFFER_SIZES.GYRO.value), OPTYPE.COLOUR_ALL.value: (", "), } def updateAllSensors(self, data): self._allSensors = data self._lastUpdatedTime = time.time() def setUseByInterval(self,", "def invalidateCache(self): self._allSensors = None def readCache(self, opType): if ( not self._allSensors or", "not self._allSensors or time.time() - self._lastUpdatedTime > self._useByInterval ): return None if isinstance(opType,", "self._lastUpdatedTime > self._useByInterval ): return None if isinstance(opType, Enum): opType = opType.value if", "= 2 BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING = 1 BATTERY_CURRENT = 2 GYRO_ACCUM =", "), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: (", "values older than 0.25 seconds will be ignored ) self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value:", "import MicromelonType as OPTYPE class BUFFER_POSITIONS(Enum): ULTRASONIC = 0 ACCL = 2 GYRO", "BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ), OPTYPE.STATE_OF_CHARGE.value: ( BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ),", "1 PERCENTAGE_PADDING = 1 BATTERY_CURRENT = 2 GYRO_ACCUM = 12 class RoverReadCache: def", "BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING = 57 BATTERY_CURRENT = 58 GYRO_ACCUM", "must be a positive non-zero number\" ) self._useByInterval = seconds def invalidateCache(self): self._allSensors", "= 50 BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING = 57 BATTERY_CURRENT =", "2 BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING = 1 BATTERY_CURRENT = 2 GYRO_ACCUM = 12", "= 4 BATTERY_VOLTAGE = 2 BATTERY_PERCENTAGE = 1 PERCENTAGE_PADDING = 1 BATTERY_CURRENT =", "by interval for RoverReadCache must be a positive non-zero number\" ) self._useByInterval =", "Exception( \"Use by interval for RoverReadCache must be a positive non-zero number\" )", "2 ACCL = 6 GYRO = 12 COLOUR_ALL = 30 TIME_OF_FLIGHT = 4", "data): self._allSensors = data self._lastUpdatedTime = time.time() def setUseByInterval(self, seconds): if seconds <=", "} def updateAllSensors(self, data): self._allSensors = data self._lastUpdatedTime = time.time() def setUseByInterval(self, seconds):", "COLOUR_ALL = 20 TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE = 56 PERCENTAGE_PADDING", "class RoverReadCache: def __init__(self) -> None: self._allSensors = None self._lastUpdatedTime = 0 self._useByInterval", "0 ACCL = 2 GYRO = 8 COLOUR_ALL = 20 TIME_OF_FLIGHT = 50", "BUFFER_POSITIONS.BATTERY_PERCENTAGE.value, BUFFER_SIZES.BATTERY_PERCENTAGE.value, ), OPTYPE.CURRENT_SENSOR.value: ( BUFFER_POSITIONS.BATTERY_CURRENT.value, BUFFER_SIZES.BATTERY_CURRENT.value, ), OPTYPE.GYRO_ACCUM.value: ( BUFFER_POSITIONS.GYRO_ACCUM.value, BUFFER_SIZES.GYRO_ACCUM.value, ),", "self._allSensors = None def readCache(self, opType): if ( not self._allSensors or time.time() -", "= opType.value if opType in self._startAndSizeIndexForOpType: indices = self._startAndSizeIndexForOpType[opType] return self._allSensors[indices[0] : indices[0]", "invalidateCache(self): self._allSensors = None def readCache(self, opType): if ( not self._allSensors or time.time()", "GYRO = 8 COLOUR_ALL = 20 TIME_OF_FLIGHT = 50 BATTERY_VOLTAGE = 54 BATTERY_PERCENTAGE", "\"Use by interval for RoverReadCache must be a positive non-zero number\" ) self._useByInterval", "Enum): opType = opType.value if opType in self._startAndSizeIndexForOpType: indices = self._startAndSizeIndexForOpType[opType] return self._allSensors[indices[0]", "None if isinstance(opType, Enum): opType = opType.value if opType in self._startAndSizeIndexForOpType: indices =", "58 GYRO_ACCUM = 60 class BUFFER_SIZES(Enum): ULTRASONIC = 2 ACCL = 6 GYRO", "__init__(self) -> None: self._allSensors = None self._lastUpdatedTime = 0 self._useByInterval = ( 0.25", "ignored ) self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value),", "BUFFER_POSITIONS.COLOUR_ALL.value, BUFFER_SIZES.COLOUR_ALL.value, ), OPTYPE.TIME_OF_FLIGHT.value: ( BUFFER_POSITIONS.TIME_OF_FLIGHT.value, BUFFER_SIZES.TIME_OF_FLIGHT.value, ), OPTYPE.BATTERY_VOLTAGE.value: ( BUFFER_POSITIONS.BATTERY_VOLTAGE.value, BUFFER_SIZES.BATTERY_VOLTAGE.value, ),", "self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value, BUFFER_SIZES.ULTRASONIC.value, ), OPTYPE.ACCL.value: (BUFFER_POSITIONS.ACCL.value, BUFFER_SIZES.ACCL.value), OPTYPE.GYRO.value: (BUFFER_POSITIONS.GYRO.value,", "= 2 GYRO_ACCUM = 12 class RoverReadCache: def __init__(self) -> None: self._allSensors =", "than 0.25 seconds will be ignored ) self._startAndSizeIndexForOpType = { OPTYPE.ULTRASONIC.value: ( BUFFER_POSITIONS.ULTRASONIC.value,", "if seconds <= 0: raise Exception( \"Use by interval for RoverReadCache must be" ]
[ "the y-axis. INSTRUCTIONS 70XP First, use plt.hist() to plot the histogram of the", "the histogram of the 1-D array pixels in the bottom subplot. Use the", "pre-flattened into the 1D array pixels for you. The histogram option cumulative=True permits", "BY 2.0). This time, the 2D array image will be pre-loaded and pre-flattened", "Use plt.twinx() to overlay the CDF in the bottom subplot plt.twinx() # Display", "the pixels cdf = plt.hist(pixels, bins=64, range=(0,256), cumulative=True, normed=True, color='blue', alpha=0.4) # Specify", "x-axis range, hide axes, add title and display plot plt.xlim((0,256)) plt.grid('off') plt.title('PDF &", "plots to be overlayed sharing the x-axis but with different scales on the", "cmap='gray') plt.title('Original image') plt.axis('off') # Flatten the image into 1 dimension: pixels pixels", "Use the histogram options bins=64, range=(0,256), and normed=True. This time, also use cumulative=True", "see. ''' # Load the image into an array: image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')", "axis. Third, call plt.hist() again to overlay the CDF in the bottom subplot.", "cumulative histogram of the pixels cdf = plt.hist(pixels, bins=64, range=(0,256), cumulative=True, normed=True, color='blue',", "grayscale image. You will use the grayscale image of Hawkes Bay, New Zealand", "plotting options alpha=0.4 and color='red' to make the overlayed plots easier to see.", "color='red', alpha=0.4) plt.grid('off') # Use plt.twinx() to overlay the CDF in the bottom", "First, use plt.hist() to plot the histogram of the 1-D array pixels in", "# Load the image into an array: image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display", "(originally by <NAME>, modified by User:Konstable, via Wikimedia Commons, CC BY 2.0). This", "off distracting grid lines. The command plt.twinx() allows two plots to be overlayed", "and CDF of pixel intensities from a grayscale image. You will use the", "image into an array: image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image in top", "bins=64, range=(0,256), cumulative=True, normed=True, color='blue', alpha=0.4) # Specify x-axis range, hide axes, add", "options bins=64, range=(0,256), and normed=True. This time, also use cumulative=True to compute and", "a continuous random variable is sometimes called a Probability Distribution Function (or PDF).", "by <NAME>, modified by User:Konstable, via Wikimedia Commons, CC BY 2.0). This time,", "allows two plots to be overlayed sharing the x-axis but with different scales", "cdf = plt.hist(pixels, bins=64, range=(0,256), cumulative=True, normed=True, color='blue', alpha=0.4) # Specify x-axis range,", "use plt.twinx() to overlay plots with different vertical scales on a common horizontal", "call plt.hist() again to overlay the CDF in the bottom subplot. Use the", "You will use the grayscale image of Hawkes Bay, New Zealand (originally by", "random variable is sometimes called a Probability Distribution Function (or PDF). The area", "A histogram of a continuous random variable is sometimes called a Probability Distribution", "compute and display the CDF. Also, use alpha=0.4 and color='blue' to make the", "the overlayed plots easier to see. Second, use plt.twinx() to overlay plots with", "switches off distracting grid lines. The command plt.twinx() allows two plots to be", "color='blue' to make the overlayed plots easier to see. ''' # Load the", "in the bottom subplot. Use the histogram options bins=64, range=(0,256), and normed=False. Use", "plt.twinx() # Display a cumulative histogram of the pixels cdf = plt.hist(pixels, bins=64,", "the pixels in the bottom subplot plt.subplot(2,1,2) pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False,", "modified by User:Konstable, via Wikimedia Commons, CC BY 2.0). This time, the 2D", "plt.hist() to plot the histogram of the 1-D array pixels in the bottom", "image') plt.axis('off') # Flatten the image into 1 dimension: pixels pixels = image.flatten()", "a histogram of the pixels in the bottom subplot plt.subplot(2,1,2) pdf = plt.hist(pixels,", "plots easier to see. Second, use plt.twinx() to overlay plots with different vertical", "see. Second, use plt.twinx() to overlay plots with different vertical scales on a", "intensities from a grayscale image. You will use the grayscale image of Hawkes", "Function from an image histogram A histogram of a continuous random variable is", "of the pixels in the bottom subplot plt.subplot(2,1,2) pdf = plt.hist(pixels, bins=64, range=(0,256),", "New Zealand (originally by <NAME>, modified by User:Konstable, via Wikimedia Commons, CC BY", "(or PDF). The area under a PDF (a definite integral) is called a", "pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False, color='red', alpha=0.4) plt.grid('off') # Use plt.twinx() to", "of the pixels cdf = plt.hist(pixels, bins=64, range=(0,256), cumulative=True, normed=True, color='blue', alpha=0.4) #", "Wikimedia Commons, CC BY 2.0). This time, the 2D array image will be", "the CDF in the bottom subplot plt.twinx() # Display a cumulative histogram of", "CDF in the bottom subplot plt.twinx() # Display a cumulative histogram of the", "plots easier to see. ''' # Load the image into an array: image", "to overlay plots with different vertical scales on a common horizontal axis. Third,", "to make the overlayed plots easier to see. ''' # Load the image", "lines. The command plt.twinx() allows two plots to be overlayed sharing the x-axis", "Zealand (originally by <NAME>, modified by User:Konstable, via Wikimedia Commons, CC BY 2.0).", "the bottom subplot plt.twinx() # Display a cumulative histogram of the pixels cdf", "histogram A histogram of a continuous random variable is sometimes called a Probability", "color='red' to make the overlayed plots easier to see. Second, use plt.twinx() to", "the histogram options bins=64, range=(0,256), and normed=False. Use the plotting options alpha=0.4 and", "histogram of the pixels in the bottom subplot plt.subplot(2,1,2) pdf = plt.hist(pixels, bins=64,", "Specify x-axis range, hide axes, add title and display plot plt.xlim((0,256)) plt.grid('off') plt.title('PDF", "options alpha=0.4 and color='red' to make the overlayed plots easier to see. Second,", "be pre-loaded and pre-flattened into the 1D array pixels for you. The histogram", "# Specify x-axis range, hide axes, add title and display plot plt.xlim((0,256)) plt.grid('off')", "probability of observing certain pixel intensities. Your task here is to plot the", "a Probability Distribution Function (or PDF). The area under a PDF (a definite", "viewing the CDF instead of the PDF. Notice that plt.grid('off') switches off distracting", "1D array pixels for you. The histogram option cumulative=True permits viewing the CDF", "again to overlay the CDF in the bottom subplot. Use the histogram options", "distracting grid lines. The command plt.twinx() allows two plots to be overlayed sharing", "scales on the y-axis. INSTRUCTIONS 70XP First, use plt.hist() to plot the histogram", "two plots to be overlayed sharing the x-axis but with different scales on", "for you. The histogram option cumulative=True permits viewing the CDF instead of the", "pixel intensities. Your task here is to plot the PDF and CDF of", "color map 'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original image') plt.axis('off') # Flatten the image", "plt.axis('off') # Flatten the image into 1 dimension: pixels pixels = image.flatten() #", "to see. ''' # Load the image into an array: image image =", "of Hawkes Bay, New Zealand (originally by <NAME>, modified by User:Konstable, via Wikimedia", "here is to plot the PDF and CDF of pixel intensities from a", "(or CDF). The CDF quantifies the probability of observing certain pixel intensities. Your", "make the overlayed plots easier to see. ''' # Load the image into", "image into 1 dimension: pixels pixels = image.flatten() # Display a histogram of", "1 dimension: pixels pixels = image.flatten() # Display a histogram of the pixels", "CDF of pixel intensities from a grayscale image. You will use the grayscale", "2.0). This time, the 2D array image will be pre-loaded and pre-flattened into", "image. You will use the grayscale image of Hawkes Bay, New Zealand (originally", "array pixels in the bottom subplot. Use the histogram options bins=64, range=(0,256), and", "a cumulative histogram of the pixels cdf = plt.hist(pixels, bins=64, range=(0,256), cumulative=True, normed=True,", "plt.hist(pixels, bins=64, range=(0,256), cumulative=True, normed=True, color='blue', alpha=0.4) # Specify x-axis range, hide axes,", "histogram of a continuous random variable is sometimes called a Probability Distribution Function", "pixel intensities from a grayscale image. You will use the grayscale image of", "histogram options bins=64, range=(0,256), and normed=True. This time, also use cumulative=True to compute", "to plot the PDF and CDF of pixel intensities from a grayscale image.", "image will be pre-loaded and pre-flattened into the 1D array pixels for you.", "Flatten the image into 1 dimension: pixels pixels = image.flatten() # Display a", "User:Konstable, via Wikimedia Commons, CC BY 2.0). This time, the 2D array image", "cumulative=True to compute and display the CDF. Also, use alpha=0.4 and color='blue' to", "The command plt.twinx() allows two plots to be overlayed sharing the x-axis but", "observing certain pixel intensities. Your task here is to plot the PDF and", "a common horizontal axis. Third, call plt.hist() again to overlay the CDF in", "cumulative=True permits viewing the CDF instead of the PDF. Notice that plt.grid('off') switches", "add title and display plot plt.xlim((0,256)) plt.grid('off') plt.title('PDF & CDF (original image)') plt.show()", "and display the CDF. Also, use alpha=0.4 and color='blue' to make the overlayed", "overlay plots with different vertical scales on a common horizontal axis. Third, call", "the grayscale image of Hawkes Bay, New Zealand (originally by <NAME>, modified by", "normed=True, color='blue', alpha=0.4) # Specify x-axis range, hide axes, add title and display", "plot the PDF and CDF of pixel intensities from a grayscale image. You", "an array: image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image in top subplot using", "into the 1D array pixels for you. The histogram option cumulative=True permits viewing", "cumulative=True, normed=True, color='blue', alpha=0.4) # Specify x-axis range, hide axes, add title and", "of observing certain pixel intensities. Your task here is to plot the PDF", "x-axis but with different scales on the y-axis. INSTRUCTIONS 70XP First, use plt.hist()", "and normed=False. Use the plotting options alpha=0.4 and color='red' to make the overlayed", "different vertical scales on a common horizontal axis. Third, call plt.hist() again to", "PDF). The area under a PDF (a definite integral) is called a Cumulative", "the PDF. Notice that plt.grid('off') switches off distracting grid lines. The command plt.twinx()", "grayscale image of Hawkes Bay, New Zealand (originally by <NAME>, modified by User:Konstable,", "a PDF (a definite integral) is called a Cumulative Distribution Function (or CDF).", "will be pre-loaded and pre-flattened into the 1D array pixels for you. The", "pixels = image.flatten() # Display a histogram of the pixels in the bottom", "histogram of the pixels cdf = plt.hist(pixels, bins=64, range=(0,256), cumulative=True, normed=True, color='blue', alpha=0.4)", "use alpha=0.4 and color='blue' to make the overlayed plots easier to see. '''", "overlayed sharing the x-axis but with different scales on the y-axis. INSTRUCTIONS 70XP", "instead of the PDF. Notice that plt.grid('off') switches off distracting grid lines. The", "pixels pixels = image.flatten() # Display a histogram of the pixels in the", "'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original image') plt.axis('off') # Flatten the image into 1", "image histogram A histogram of a continuous random variable is sometimes called a", "the bottom subplot. Use the histogram options bins=64, range=(0,256), and normed=False. Use the", "Second, use plt.twinx() to overlay plots with different vertical scales on a common", "image of Hawkes Bay, New Zealand (originally by <NAME>, modified by User:Konstable, via", "of the PDF. Notice that plt.grid('off') switches off distracting grid lines. The command", "PDF. Notice that plt.grid('off') switches off distracting grid lines. The command plt.twinx() allows", "image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image in top subplot using color map 'gray'", "alpha=0.4 and color='blue' to make the overlayed plots easier to see. ''' #", "you. The histogram option cumulative=True permits viewing the CDF instead of the PDF.", "the x-axis but with different scales on the y-axis. INSTRUCTIONS 70XP First, use", "overlayed plots easier to see. Second, use plt.twinx() to overlay plots with different", "This time, also use cumulative=True to compute and display the CDF. Also, use", "and color='blue' to make the overlayed plots easier to see. ''' # Load", "plt.grid('off') switches off distracting grid lines. The command plt.twinx() allows two plots to", "the 2D array image will be pre-loaded and pre-flattened into the 1D array", "on a common horizontal axis. Third, call plt.hist() again to overlay the CDF", "overlay the CDF in the bottom subplot. Use the histogram options bins=64, range=(0,256),", "certain pixel intensities. Your task here is to plot the PDF and CDF", "Probability Distribution Function (or PDF). The area under a PDF (a definite integral)", "The CDF quantifies the probability of observing certain pixel intensities. Your task here", "intensities. Your task here is to plot the PDF and CDF of pixel", "bins=64, range=(0,256), and normed=True. This time, also use cumulative=True to compute and display", "= plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image in top subplot using color map 'gray' plt.subplot(2,1,1)", "is to plot the PDF and CDF of pixel intensities from a grayscale", "to be overlayed sharing the x-axis but with different scales on the y-axis.", "subplot. Use the histogram options bins=64, range=(0,256), and normed=True. This time, also use", "the histogram options bins=64, range=(0,256), and normed=True. This time, also use cumulative=True to", "integral) is called a Cumulative Distribution Function (or CDF). The CDF quantifies the", "# Flatten the image into 1 dimension: pixels pixels = image.flatten() # Display", "the PDF and CDF of pixel intensities from a grayscale image. You will", "overlayed plots easier to see. ''' # Load the image into an array:", "subplot plt.twinx() # Display a cumulative histogram of the pixels cdf = plt.hist(pixels,", "plt.twinx() to overlay the CDF in the bottom subplot plt.twinx() # Display a", "with different scales on the y-axis. INSTRUCTIONS 70XP First, use plt.hist() to plot", "bins=64, range=(0,256), and normed=False. Use the plotting options alpha=0.4 and color='red' to make", "array pixels for you. The histogram option cumulative=True permits viewing the CDF instead", "use the grayscale image of Hawkes Bay, New Zealand (originally by <NAME>, modified", "to see. Second, use plt.twinx() to overlay plots with different vertical scales on", "the overlayed plots easier to see. ''' # Load the image into an", "# Display image in top subplot using color map 'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray')", "CDF instead of the PDF. Notice that plt.grid('off') switches off distracting grid lines.", "the CDF instead of the PDF. Notice that plt.grid('off') switches off distracting grid", "Distribution Function (or CDF). The CDF quantifies the probability of observing certain pixel", "command plt.twinx() allows two plots to be overlayed sharing the x-axis but with", "the 1-D array pixels in the bottom subplot. Use the histogram options bins=64,", "Distribution Function from an image histogram A histogram of a continuous random variable", "Cumulative Distribution Function (or CDF). The CDF quantifies the probability of observing certain", "from a grayscale image. You will use the grayscale image of Hawkes Bay,", "horizontal axis. Third, call plt.hist() again to overlay the CDF in the bottom", "overlay the CDF in the bottom subplot plt.twinx() # Display a cumulative histogram", "image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image in top subplot using color map", "options bins=64, range=(0,256), and normed=False. Use the plotting options alpha=0.4 and color='red' to", "called a Probability Distribution Function (or PDF). The area under a PDF (a", "PDF (a definite integral) is called a Cumulative Distribution Function (or CDF). The", "the bottom subplot plt.subplot(2,1,2) pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False, color='red', alpha=0.4) plt.grid('off')", "be overlayed sharing the x-axis but with different scales on the y-axis. INSTRUCTIONS", "''' Cumulative Distribution Function from an image histogram A histogram of a continuous", "subplot using color map 'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original image') plt.axis('off') # Flatten", "but with different scales on the y-axis. INSTRUCTIONS 70XP First, use plt.hist() to", "Display a cumulative histogram of the pixels cdf = plt.hist(pixels, bins=64, range=(0,256), cumulative=True,", "use cumulative=True to compute and display the CDF. Also, use alpha=0.4 and color='blue'", "under a PDF (a definite integral) is called a Cumulative Distribution Function (or", "Your task here is to plot the PDF and CDF of pixel intensities", "using color map 'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original image') plt.axis('off') # Flatten the", "The area under a PDF (a definite integral) is called a Cumulative Distribution", "the image into an array: image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image in", "also use cumulative=True to compute and display the CDF. Also, use alpha=0.4 and", "in top subplot using color map 'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original image') plt.axis('off')", "Display a histogram of the pixels in the bottom subplot plt.subplot(2,1,2) pdf =", "Bay, New Zealand (originally by <NAME>, modified by User:Konstable, via Wikimedia Commons, CC", "range=(0,256), and normed=True. This time, also use cumulative=True to compute and display the", "bottom subplot. Use the histogram options bins=64, range=(0,256), and normed=True. This time, also", "''' # Load the image into an array: image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') #", "pixels in the bottom subplot. Use the histogram options bins=64, range=(0,256), and normed=False.", "alpha=0.4) # Specify x-axis range, hide axes, add title and display plot plt.xlim((0,256))", "of a continuous random variable is sometimes called a Probability Distribution Function (or", "Hawkes Bay, New Zealand (originally by <NAME>, modified by User:Konstable, via Wikimedia Commons,", "hide axes, add title and display plot plt.xlim((0,256)) plt.grid('off') plt.title('PDF & CDF (original", "image.flatten() # Display a histogram of the pixels in the bottom subplot plt.subplot(2,1,2)", "image in top subplot using color map 'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original image')", "the probability of observing certain pixel intensities. Your task here is to plot", "time, also use cumulative=True to compute and display the CDF. Also, use alpha=0.4", "Load the image into an array: image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image", "task here is to plot the PDF and CDF of pixel intensities from", "will use the grayscale image of Hawkes Bay, New Zealand (originally by <NAME>,", "The histogram option cumulative=True permits viewing the CDF instead of the PDF. Notice", "PDF and CDF of pixel intensities from a grayscale image. You will use", "pixels in the bottom subplot plt.subplot(2,1,2) pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False, color='red',", "plots with different vertical scales on a common horizontal axis. Third, call plt.hist()", "to plot the histogram of the 1-D array pixels in the bottom subplot.", "variable is sometimes called a Probability Distribution Function (or PDF). The area under", "in the bottom subplot. Use the histogram options bins=64, range=(0,256), and normed=True. This", "the image into 1 dimension: pixels pixels = image.flatten() # Display a histogram", "1-D array pixels in the bottom subplot. Use the histogram options bins=64, range=(0,256),", "continuous random variable is sometimes called a Probability Distribution Function (or PDF). The", "70XP First, use plt.hist() to plot the histogram of the 1-D array pixels", "bins=64, range=(0,256), normed=False, color='red', alpha=0.4) plt.grid('off') # Use plt.twinx() to overlay the CDF", "alpha=0.4) plt.grid('off') # Use plt.twinx() to overlay the CDF in the bottom subplot", "area under a PDF (a definite integral) is called a Cumulative Distribution Function", "plt.twinx() to overlay plots with different vertical scales on a common horizontal axis.", "= plt.hist(pixels, bins=64, range=(0,256), normed=False, color='red', alpha=0.4) plt.grid('off') # Use plt.twinx() to overlay", "by User:Konstable, via Wikimedia Commons, CC BY 2.0). This time, the 2D array", "axes, add title and display plot plt.xlim((0,256)) plt.grid('off') plt.title('PDF & CDF (original image)')", "sometimes called a Probability Distribution Function (or PDF). The area under a PDF", "the plotting options alpha=0.4 and color='red' to make the overlayed plots easier to", "Also, use alpha=0.4 and color='blue' to make the overlayed plots easier to see.", "the CDF. Also, use alpha=0.4 and color='blue' to make the overlayed plots easier", "bottom subplot. Use the histogram options bins=64, range=(0,256), and normed=False. Use the plotting", "definite integral) is called a Cumulative Distribution Function (or CDF). The CDF quantifies", "Function (or PDF). The area under a PDF (a definite integral) is called", "CDF quantifies the probability of observing certain pixel intensities. Your task here is", "and normed=True. This time, also use cumulative=True to compute and display the CDF.", "and color='red' to make the overlayed plots easier to see. Second, use plt.twinx()", "CDF). The CDF quantifies the probability of observing certain pixel intensities. Your task", "quantifies the probability of observing certain pixel intensities. Your task here is to", "different scales on the y-axis. INSTRUCTIONS 70XP First, use plt.hist() to plot the", "subplot. Use the histogram options bins=64, range=(0,256), and normed=False. Use the plotting options", "permits viewing the CDF instead of the PDF. Notice that plt.grid('off') switches off", "plt.hist() again to overlay the CDF in the bottom subplot. Use the histogram", "Display image in top subplot using color map 'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original", "a Cumulative Distribution Function (or CDF). The CDF quantifies the probability of observing", "plt.imshow(image, cmap='gray') plt.title('Original image') plt.axis('off') # Flatten the image into 1 dimension: pixels", "subplot plt.subplot(2,1,2) pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False, color='red', alpha=0.4) plt.grid('off') # Use", "CDF in the bottom subplot. Use the histogram options bins=64, range=(0,256), and normed=True.", "to overlay the CDF in the bottom subplot. Use the histogram options bins=64,", "normed=False. Use the plotting options alpha=0.4 and color='red' to make the overlayed plots", "of pixel intensities from a grayscale image. You will use the grayscale image", "CC BY 2.0). This time, the 2D array image will be pre-loaded and", "histogram of the 1-D array pixels in the bottom subplot. Use the histogram", "histogram options bins=64, range=(0,256), and normed=False. Use the plotting options alpha=0.4 and color='red'", "to overlay the CDF in the bottom subplot plt.twinx() # Display a cumulative", "range=(0,256), cumulative=True, normed=True, color='blue', alpha=0.4) # Specify x-axis range, hide axes, add title", "to make the overlayed plots easier to see. Second, use plt.twinx() to overlay", "Distribution Function (or PDF). The area under a PDF (a definite integral) is", "# Use plt.twinx() to overlay the CDF in the bottom subplot plt.twinx() #", "range, hide axes, add title and display plot plt.xlim((0,256)) plt.grid('off') plt.title('PDF & CDF", "to compute and display the CDF. Also, use alpha=0.4 and color='blue' to make", "Notice that plt.grid('off') switches off distracting grid lines. The command plt.twinx() allows two", "option cumulative=True permits viewing the CDF instead of the PDF. Notice that plt.grid('off')", "range=(0,256), normed=False, color='red', alpha=0.4) plt.grid('off') # Use plt.twinx() to overlay the CDF in", "plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image in top subplot using color map 'gray' plt.subplot(2,1,1) plt.imshow(image,", "plt.grid('off') # Use plt.twinx() to overlay the CDF in the bottom subplot plt.twinx()", "into an array: image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image in top subplot", "a grayscale image. You will use the grayscale image of Hawkes Bay, New", "the bottom subplot. Use the histogram options bins=64, range=(0,256), and normed=True. This time,", "Use the plotting options alpha=0.4 and color='red' to make the overlayed plots easier", "array: image image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg') # Display image in top subplot using color", "normed=True. This time, also use cumulative=True to compute and display the CDF. Also,", "plt.title('Original image') plt.axis('off') # Flatten the image into 1 dimension: pixels pixels =", "and pre-flattened into the 1D array pixels for you. The histogram option cumulative=True", "sharing the x-axis but with different scales on the y-axis. INSTRUCTIONS 70XP First,", "common horizontal axis. Third, call plt.hist() again to overlay the CDF in the", "CDF. Also, use alpha=0.4 and color='blue' to make the overlayed plots easier to", "in the bottom subplot plt.subplot(2,1,2) pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False, color='red', alpha=0.4)", "use plt.hist() to plot the histogram of the 1-D array pixels in the", "plot the histogram of the 1-D array pixels in the bottom subplot. Use", "= plt.hist(pixels, bins=64, range=(0,256), cumulative=True, normed=True, color='blue', alpha=0.4) # Specify x-axis range, hide", "is sometimes called a Probability Distribution Function (or PDF). The area under a", "called a Cumulative Distribution Function (or CDF). The CDF quantifies the probability of", "# Display a histogram of the pixels in the bottom subplot plt.subplot(2,1,2) pdf", "bottom subplot plt.twinx() # Display a cumulative histogram of the pixels cdf =", "via Wikimedia Commons, CC BY 2.0). This time, the 2D array image will", "This time, the 2D array image will be pre-loaded and pre-flattened into the", "make the overlayed plots easier to see. Second, use plt.twinx() to overlay plots", "in the bottom subplot plt.twinx() # Display a cumulative histogram of the pixels", "pixels cdf = plt.hist(pixels, bins=64, range=(0,256), cumulative=True, normed=True, color='blue', alpha=0.4) # Specify x-axis", "array image will be pre-loaded and pre-flattened into the 1D array pixels for", "pre-loaded and pre-flattened into the 1D array pixels for you. The histogram option", "Use the histogram options bins=64, range=(0,256), and normed=False. Use the plotting options alpha=0.4", "= image.flatten() # Display a histogram of the pixels in the bottom subplot", "map 'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original image') plt.axis('off') # Flatten the image into", "2D array image will be pre-loaded and pre-flattened into the 1D array pixels", "range=(0,256), and normed=False. Use the plotting options alpha=0.4 and color='red' to make the", "that plt.grid('off') switches off distracting grid lines. The command plt.twinx() allows two plots", "scales on a common horizontal axis. Third, call plt.hist() again to overlay the", "time, the 2D array image will be pre-loaded and pre-flattened into the 1D", "top subplot using color map 'gray' plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original image') plt.axis('off') #", "(a definite integral) is called a Cumulative Distribution Function (or CDF). The CDF", "alpha=0.4 and color='red' to make the overlayed plots easier to see. Second, use", "INSTRUCTIONS 70XP First, use plt.hist() to plot the histogram of the 1-D array", "plt.subplot(2,1,2) pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False, color='red', alpha=0.4) plt.grid('off') # Use plt.twinx()", "histogram option cumulative=True permits viewing the CDF instead of the PDF. Notice that", "an image histogram A histogram of a continuous random variable is sometimes called", "plt.twinx() allows two plots to be overlayed sharing the x-axis but with different", "on the y-axis. INSTRUCTIONS 70XP First, use plt.hist() to plot the histogram of", "grid lines. The command plt.twinx() allows two plots to be overlayed sharing the", "bottom subplot plt.subplot(2,1,2) pdf = plt.hist(pixels, bins=64, range=(0,256), normed=False, color='red', alpha=0.4) plt.grid('off') #", "plt.hist(pixels, bins=64, range=(0,256), normed=False, color='red', alpha=0.4) plt.grid('off') # Use plt.twinx() to overlay the", "the 1D array pixels for you. The histogram option cumulative=True permits viewing the", "with different vertical scales on a common horizontal axis. Third, call plt.hist() again", "Third, call plt.hist() again to overlay the CDF in the bottom subplot. Use", "# Display a cumulative histogram of the pixels cdf = plt.hist(pixels, bins=64, range=(0,256),", "vertical scales on a common horizontal axis. Third, call plt.hist() again to overlay", "easier to see. ''' # Load the image into an array: image image", "from an image histogram A histogram of a continuous random variable is sometimes", "plt.subplot(2,1,1) plt.imshow(image, cmap='gray') plt.title('Original image') plt.axis('off') # Flatten the image into 1 dimension:", "is called a Cumulative Distribution Function (or CDF). The CDF quantifies the probability", "dimension: pixels pixels = image.flatten() # Display a histogram of the pixels in", "display the CDF. Also, use alpha=0.4 and color='blue' to make the overlayed plots", "y-axis. INSTRUCTIONS 70XP First, use plt.hist() to plot the histogram of the 1-D", "<NAME>, modified by User:Konstable, via Wikimedia Commons, CC BY 2.0). This time, the", "easier to see. Second, use plt.twinx() to overlay plots with different vertical scales", "of the 1-D array pixels in the bottom subplot. Use the histogram options", "pixels for you. The histogram option cumulative=True permits viewing the CDF instead of", "the CDF in the bottom subplot. Use the histogram options bins=64, range=(0,256), and", "color='blue', alpha=0.4) # Specify x-axis range, hide axes, add title and display plot", "Cumulative Distribution Function from an image histogram A histogram of a continuous random", "Function (or CDF). The CDF quantifies the probability of observing certain pixel intensities.", "Commons, CC BY 2.0). This time, the 2D array image will be pre-loaded", "normed=False, color='red', alpha=0.4) plt.grid('off') # Use plt.twinx() to overlay the CDF in the", "into 1 dimension: pixels pixels = image.flatten() # Display a histogram of the" ]
[ "NFHead from .none import NoneHead from .pre_activation import PreActivationHead from .swin import SwinHead", ".mobilenet import MobileNetV2Head, MobileNetV3Head from .nfnet import NFHead from .none import NoneHead from", "NoneHead from .pre_activation import PreActivationHead from .swin import SwinHead from .vit import ViTHead", "from .nfnet import NFHead from .none import NoneHead from .pre_activation import PreActivationHead from", "import NoneHead from .pre_activation import PreActivationHead from .swin import SwinHead from .vit import", "<gh_stars>1-10 from .mobilenet import MobileNetV2Head, MobileNetV3Head from .nfnet import NFHead from .none import", "from .mobilenet import MobileNetV2Head, MobileNetV3Head from .nfnet import NFHead from .none import NoneHead", "import MobileNetV2Head, MobileNetV3Head from .nfnet import NFHead from .none import NoneHead from .pre_activation", "from .none import NoneHead from .pre_activation import PreActivationHead from .swin import SwinHead from", ".nfnet import NFHead from .none import NoneHead from .pre_activation import PreActivationHead from .swin", "import NFHead from .none import NoneHead from .pre_activation import PreActivationHead from .swin import", "MobileNetV2Head, MobileNetV3Head from .nfnet import NFHead from .none import NoneHead from .pre_activation import", ".none import NoneHead from .pre_activation import PreActivationHead from .swin import SwinHead from .vit", "MobileNetV3Head from .nfnet import NFHead from .none import NoneHead from .pre_activation import PreActivationHead" ]
[ "poop == \"randomNum\": print(\"This define prints a random number\") print(\"using random randint between", "used as a template on how to make a tester correctly import ce_tools", "Engine Tools Tester (CE_tools.py)\") print(\"This tools tests all functions up to\", ce_tools.ce_tools_ver) while", "random dice\") ce_tools.rollADice() elif poop == \"exit\": SystemExit() elif poop == \"Debug\": ce_tools.debugFound()", "== \"roll a dice\": print(\"The dice function rolls a random dice\") ce_tools.rollADice() elif", "between 0 to infinity\") ce_tools.randomNum() elif poop == \"request hello\": print(\"this program says", "says hi to you in different ways\") print(\"Check out cetHelp.txt for more info", "Engine Utillity \"CE_tools.py\" # This can also be used as a template on", "random randint between 0 to infinity\") ce_tools.randomNum() elif poop == \"request hello\": print(\"this", "tools tests all functions up to\", ce_tools.ce_tools_ver) while True: poop = input(\"Where would", "print(\"This tools tests all functions up to\", ce_tools.ce_tools_ver) while True: poop = input(\"Where", "ce_tools.randomNum() elif poop == \"request hello\": print(\"this program says hi to you in", "all functions up to\", ce_tools.ce_tools_ver) while True: poop = input(\"Where would you like", "dice\": print(\"The dice function rolls a random dice\") ce_tools.rollADice() elif poop == \"exit\":", "that\") ce_tools.RequestHello() elif poop == \"roll a dice\": print(\"The dice function rolls a", "SystemExit() elif poop == \"Debug\": ce_tools.debugFound() # exclusive for testers only else: print(ce_tools.wrongInputTester)", "elif poop == \"request hello\": print(\"this program says hi to you in different", "a dice\": print(\"The dice function rolls a random dice\") ce_tools.rollADice() elif poop ==", "tester correctly import ce_tools from ce_tools import debugFound, systemc systemc.system(\"cls\" if systemc.name=='nt' else", "infinity\") ce_tools.randomNum() elif poop == \"request hello\": print(\"this program says hi to you", "as a template on how to make a tester correctly import ce_tools from", "ce_tools.rollADice() elif poop == \"exit\": SystemExit() elif poop == \"Debug\": ce_tools.debugFound() # exclusive", "\"exit\": SystemExit() elif poop == \"Debug\": ce_tools.debugFound() # exclusive for testers only else:", "# exclusive for testers only else: print(ce_tools.wrongInputTester) # uses the text from ce_tools", "on that\") ce_tools.RequestHello() elif poop == \"roll a dice\": print(\"The dice function rolls", "hello\": print(\"this program says hi to you in different ways\") print(\"Check out cetHelp.txt", "\"randomNum\": print(\"This define prints a random number\") print(\"using random randint between 0 to", "ce_tools from ce_tools import debugFound, systemc systemc.system(\"cls\" if systemc.name=='nt' else 'clear') print(\"Corn Engine", "systemc systemc.system(\"cls\" if systemc.name=='nt' else 'clear') print(\"Corn Engine Tools Tester (CE_tools.py)\") print(\"This tools", "'clear') print(\"Corn Engine Tools Tester (CE_tools.py)\") print(\"This tools tests all functions up to\",", "poop == \"help\": systemc.startfile(ce_tools.help) elif poop == \"randomNum\": print(\"This define prints a random", "\"CE_tools.py\" # This can also be used as a template on how to", "be used as a template on how to make a tester correctly import", "print(\"The dice function rolls a random dice\") ce_tools.rollADice() elif poop == \"exit\": SystemExit()", "number\") print(\"using random randint between 0 to infinity\") ce_tools.randomNum() elif poop == \"request", "make a tester correctly import ce_tools from ce_tools import debugFound, systemc systemc.system(\"cls\" if", "input(\"Where would you like to go?: \") if poop == \"help\": systemc.startfile(ce_tools.help) elif", "Corn Engine Utillity \"CE_tools.py\" # This can also be used as a template", "prints a random number\") print(\"using random randint between 0 to infinity\") ce_tools.randomNum() elif", "TEST#################### ####################################################= # A functional tester for the famous Corn Engine Utillity \"CE_tools.py\"", "ce_tools import debugFound, systemc systemc.system(\"cls\" if systemc.name=='nt' else 'clear') print(\"Corn Engine Tools Tester", "= input(\"Where would you like to go?: \") if poop == \"help\": systemc.startfile(ce_tools.help)", "systemc.startfile(ce_tools.help) elif poop == \"randomNum\": print(\"This define prints a random number\") print(\"using random", "if systemc.name=='nt' else 'clear') print(\"Corn Engine Tools Tester (CE_tools.py)\") print(\"This tools tests all", "a random number\") print(\"using random randint between 0 to infinity\") ce_tools.randomNum() elif poop", "a random dice\") ce_tools.rollADice() elif poop == \"exit\": SystemExit() elif poop == \"Debug\":", "you in different ways\") print(\"Check out cetHelp.txt for more info on that\") ce_tools.RequestHello()", "for more info on that\") ce_tools.RequestHello() elif poop == \"roll a dice\": print(\"The", "Utillity \"CE_tools.py\" # This can also be used as a template on how", "elif poop == \"exit\": SystemExit() elif poop == \"Debug\": ce_tools.debugFound() # exclusive for", "import ce_tools from ce_tools import debugFound, systemc systemc.system(\"cls\" if systemc.name=='nt' else 'clear') print(\"Corn", "####################################################= ###################CE_TOOLS TEST#################### ####################################################= # A functional tester for the famous Corn Engine", "elif poop == \"randomNum\": print(\"This define prints a random number\") print(\"using random randint", "\"Debug\": ce_tools.debugFound() # exclusive for testers only else: print(ce_tools.wrongInputTester) # uses the text", "a tester correctly import ce_tools from ce_tools import debugFound, systemc systemc.system(\"cls\" if systemc.name=='nt'", "functional tester for the famous Corn Engine Utillity \"CE_tools.py\" # This can also", "== \"request hello\": print(\"this program says hi to you in different ways\") print(\"Check", "cetHelp.txt for more info on that\") ce_tools.RequestHello() elif poop == \"roll a dice\":", "random number\") print(\"using random randint between 0 to infinity\") ce_tools.randomNum() elif poop ==", "more info on that\") ce_tools.RequestHello() elif poop == \"roll a dice\": print(\"The dice", "print(\"this program says hi to you in different ways\") print(\"Check out cetHelp.txt for", "poop = input(\"Where would you like to go?: \") if poop == \"help\":", "to go?: \") if poop == \"help\": systemc.startfile(ce_tools.help) elif poop == \"randomNum\": print(\"This", "== \"randomNum\": print(\"This define prints a random number\") print(\"using random randint between 0", "poop == \"roll a dice\": print(\"The dice function rolls a random dice\") ce_tools.rollADice()", "elif poop == \"roll a dice\": print(\"The dice function rolls a random dice\")", "systemc.system(\"cls\" if systemc.name=='nt' else 'clear') print(\"Corn Engine Tools Tester (CE_tools.py)\") print(\"This tools tests", "like to go?: \") if poop == \"help\": systemc.startfile(ce_tools.help) elif poop == \"randomNum\":", "while True: poop = input(\"Where would you like to go?: \") if poop", "would you like to go?: \") if poop == \"help\": systemc.startfile(ce_tools.help) elif poop", "on how to make a tester correctly import ce_tools from ce_tools import debugFound,", "== \"help\": systemc.startfile(ce_tools.help) elif poop == \"randomNum\": print(\"This define prints a random number\")", "print(\"Corn Engine Tools Tester (CE_tools.py)\") print(\"This tools tests all functions up to\", ce_tools.ce_tools_ver)", "can also be used as a template on how to make a tester", "\") if poop == \"help\": systemc.startfile(ce_tools.help) elif poop == \"randomNum\": print(\"This define prints", "debugFound, systemc systemc.system(\"cls\" if systemc.name=='nt' else 'clear') print(\"Corn Engine Tools Tester (CE_tools.py)\") print(\"This", "to infinity\") ce_tools.randomNum() elif poop == \"request hello\": print(\"this program says hi to", "###################CE_TOOLS TEST#################### ####################################################= # A functional tester for the famous Corn Engine Utillity", "== \"exit\": SystemExit() elif poop == \"Debug\": ce_tools.debugFound() # exclusive for testers only", "# A functional tester for the famous Corn Engine Utillity \"CE_tools.py\" # This", "to make a tester correctly import ce_tools from ce_tools import debugFound, systemc systemc.system(\"cls\"", "if poop == \"help\": systemc.startfile(ce_tools.help) elif poop == \"randomNum\": print(\"This define prints a", "rolls a random dice\") ce_tools.rollADice() elif poop == \"exit\": SystemExit() elif poop ==", "A functional tester for the famous Corn Engine Utillity \"CE_tools.py\" # This can", "poop == \"Debug\": ce_tools.debugFound() # exclusive for testers only else: print(ce_tools.wrongInputTester) # uses", "poop == \"request hello\": print(\"this program says hi to you in different ways\")", "print(\"This define prints a random number\") print(\"using random randint between 0 to infinity\")", "== \"Debug\": ce_tools.debugFound() # exclusive for testers only else: print(ce_tools.wrongInputTester) # uses the", "tests all functions up to\", ce_tools.ce_tools_ver) while True: poop = input(\"Where would you", "also be used as a template on how to make a tester correctly", "Tester (CE_tools.py)\") print(\"This tools tests all functions up to\", ce_tools.ce_tools_ver) while True: poop", "out cetHelp.txt for more info on that\") ce_tools.RequestHello() elif poop == \"roll a", "ce_tools.debugFound() # exclusive for testers only else: print(ce_tools.wrongInputTester) # uses the text from", "else 'clear') print(\"Corn Engine Tools Tester (CE_tools.py)\") print(\"This tools tests all functions up", "a template on how to make a tester correctly import ce_tools from ce_tools", "info on that\") ce_tools.RequestHello() elif poop == \"roll a dice\": print(\"The dice function", "to you in different ways\") print(\"Check out cetHelp.txt for more info on that\")", "0 to infinity\") ce_tools.randomNum() elif poop == \"request hello\": print(\"this program says hi", "correctly import ce_tools from ce_tools import debugFound, systemc systemc.system(\"cls\" if systemc.name=='nt' else 'clear')", "True: poop = input(\"Where would you like to go?: \") if poop ==", "print(\"using random randint between 0 to infinity\") ce_tools.randomNum() elif poop == \"request hello\":", "ways\") print(\"Check out cetHelp.txt for more info on that\") ce_tools.RequestHello() elif poop ==", "template on how to make a tester correctly import ce_tools from ce_tools import", "the famous Corn Engine Utillity \"CE_tools.py\" # This can also be used as", "This can also be used as a template on how to make a", "function rolls a random dice\") ce_tools.rollADice() elif poop == \"exit\": SystemExit() elif poop", "####################################################= # A functional tester for the famous Corn Engine Utillity \"CE_tools.py\" #", "to\", ce_tools.ce_tools_ver) while True: poop = input(\"Where would you like to go?: \")", "different ways\") print(\"Check out cetHelp.txt for more info on that\") ce_tools.RequestHello() elif poop", "up to\", ce_tools.ce_tools_ver) while True: poop = input(\"Where would you like to go?:", "go?: \") if poop == \"help\": systemc.startfile(ce_tools.help) elif poop == \"randomNum\": print(\"This define", "dice function rolls a random dice\") ce_tools.rollADice() elif poop == \"exit\": SystemExit() elif", "systemc.name=='nt' else 'clear') print(\"Corn Engine Tools Tester (CE_tools.py)\") print(\"This tools tests all functions", "from ce_tools import debugFound, systemc systemc.system(\"cls\" if systemc.name=='nt' else 'clear') print(\"Corn Engine Tools", "dice\") ce_tools.rollADice() elif poop == \"exit\": SystemExit() elif poop == \"Debug\": ce_tools.debugFound() #", "you like to go?: \") if poop == \"help\": systemc.startfile(ce_tools.help) elif poop ==", "elif poop == \"Debug\": ce_tools.debugFound() # exclusive for testers only else: print(ce_tools.wrongInputTester) #", "\"help\": systemc.startfile(ce_tools.help) elif poop == \"randomNum\": print(\"This define prints a random number\") print(\"using", "\"roll a dice\": print(\"The dice function rolls a random dice\") ce_tools.rollADice() elif poop", "ce_tools.RequestHello() elif poop == \"roll a dice\": print(\"The dice function rolls a random", "hi to you in different ways\") print(\"Check out cetHelp.txt for more info on", "# This can also be used as a template on how to make", "Tools Tester (CE_tools.py)\") print(\"This tools tests all functions up to\", ce_tools.ce_tools_ver) while True:", "import debugFound, systemc systemc.system(\"cls\" if systemc.name=='nt' else 'clear') print(\"Corn Engine Tools Tester (CE_tools.py)\")", "functions up to\", ce_tools.ce_tools_ver) while True: poop = input(\"Where would you like to", "print(\"Check out cetHelp.txt for more info on that\") ce_tools.RequestHello() elif poop == \"roll", "for the famous Corn Engine Utillity \"CE_tools.py\" # This can also be used", "ce_tools.ce_tools_ver) while True: poop = input(\"Where would you like to go?: \") if", "tester for the famous Corn Engine Utillity \"CE_tools.py\" # This can also be", "randint between 0 to infinity\") ce_tools.randomNum() elif poop == \"request hello\": print(\"this program", "(CE_tools.py)\") print(\"This tools tests all functions up to\", ce_tools.ce_tools_ver) while True: poop =", "how to make a tester correctly import ce_tools from ce_tools import debugFound, systemc", "poop == \"exit\": SystemExit() elif poop == \"Debug\": ce_tools.debugFound() # exclusive for testers", "famous Corn Engine Utillity \"CE_tools.py\" # This can also be used as a", "define prints a random number\") print(\"using random randint between 0 to infinity\") ce_tools.randomNum()", "in different ways\") print(\"Check out cetHelp.txt for more info on that\") ce_tools.RequestHello() elif", "program says hi to you in different ways\") print(\"Check out cetHelp.txt for more", "\"request hello\": print(\"this program says hi to you in different ways\") print(\"Check out" ]
[ "+ \"/output\") f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {}", "KIND, either express or implied. # See the License for the specific language", "2016 Sotera Defense Solutions Inc. # # Licensed under the Apache License, Version", "Unless required by applicable law or agreed to in writing, software # distributed", "= open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {} for line in", "os.system(\"cat \" + v + \"/part-* > \" + v + \"/output\") f", "+ table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table", "louvain_to_gephi/graphx/community_itr_\" + level + \".nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage,", "f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop", "this file except in compliance with the License. # You may obtain a", "while os.path.exists(e): os.system(\"cat \" + v + \"/part-* > \" + v +", "\" + v + \"/output\") f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w')", "+ \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) f = open(e + '/output','r') o =", "re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n') o.close() f.close() i = i + 1", "for the specific language governing permissions and # limitations under the License. #!/usr/bin/env", "open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w') for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1)", "#!/usr/bin/env python import os import re import sys from subprocess import call table", "ANY KIND, either express or implied. # See the License for the specific", "= 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat \" + v + \"/part-* > \" +", "/tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\"", "\"/output\") level = str(i+1) f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_' +", "o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n') o.close() f.close() i = i + 1 v =", "under the License. #!/usr/bin/env python import os import re import sys from subprocess", "in f: if len(line.split('\\t')) == 3: source,weight,edgelist = line.split('\\t') edgelist = edgelist.strip().split(',') for", "stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\" + level + \".nodes /tmp/trackcomms/\" + table", "call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\" + level + \".nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_\"", "\"/output/graphx/comm_\" + level, stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\" + level + \".nodes", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "-mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) call(\"hadoop fs -put", "'/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {} for line in f: id =", "'\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True)", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "OF ANY KIND, either express or implied. # See the License for the", "o.close() f.close() # Here's the looping piece i = 1 v = 'output/graphx/level_'+str(i)+'_vertices'", "License. #!/usr/bin/env python import os import re import sys from subprocess import call", "governing permissions and # limitations under the License. #!/usr/bin/env python import os import", "> \" + e + \"/output\") level = str(i+1) f = open(v +", "= edgelist.strip().split(',') for e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close() f.close() # Here's", "> \" + v + \"/output\") os.system(\"cat \" + e + \"/part-* >", "+ '.edges','w') for line in f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) +", "+ \"/output\") os.system(\"cat \" + e + \"/part-* > \" + e +", "f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {} for line", "f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w') for", "shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) f", "\"/output/graphx/comm_1\", stdout=garbage, shell=True) f = open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in f:", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "for line in f: if len(line.split('\\t')) == 3: source,weight,edgelist = line.split('\\t') edgelist =", "open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w') for line in f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line)", "import os import re import sys from subprocess import call table = sys.argv[1]", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "\"/output\") f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {} for", "open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in f: if len(line.split('\\t')) == 3: source,weight,edgelist", "v + \"/part-* > \" + v + \"/output\") os.system(\"cat \" + e", "e + \"/output\") level = str(i+1) f = open(v + '/output','r') o =", "f.close() # Here's the looping piece i = 1 v = 'output/graphx/level_'+str(i)+'_vertices' e", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "'.nodes','w') for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2)", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "{} for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2)", "open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in f: if len(line.split('\\t')) == 3: source,weight,edgelist = line.split('\\t') edgelist", "nodeMap = {} for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name =", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n') o.close() f.close() i", "= 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat \" + v + \"/part-*", "+ table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) f = open(e + '/output','r')", "+ '\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage,", "required by applicable law or agreed to in writing, software # distributed under", "'/output','r') o = open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w') for line in f: id", "f: if len(line.split('\\t')) == 3: source,weight,edgelist = line.split('\\t') edgelist = edgelist.strip().split(',') for e", "applicable law or agreed to in writing, software # distributed under the License", "i = 1 v = 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat \"", "line).group(2) nodeMap[id] = name o.write(name + '\\t' + comm + '\\n') f.close() o.close()", "+ v + \"/part-* > \" + v + \"/output\") f = open(v", "open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {} for line in f:", "or agreed to in writing, software # distributed under the License is distributed", "for line in f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n') o.close()", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "v = 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat \" + v +", "3: source,weight,edgelist = line.split('\\t') edgelist = edgelist.strip().split(',') for e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) +", "\" + v + \"/part-* > \" + v + \"/output\") os.system(\"cat \"", "= re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] = name o.write(name + '\\t'", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id]", "level + \".nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) f", "+ '/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {} for line in f: id", "o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close() f.close() # Here's the looping piece i = 1", "stdout=garbage, shell=True) f = open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in f: if", "f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n') o.close() f.close() i =", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "/tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) f = open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w')", "License. # You may obtain a copy of the License at # #", "= open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w') for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)',", "+ \"/part-* > \" + v + \"/output\") f = open(v + '/output','r')", "= name o.write(name + '\\t' + comm + '\\n') f.close() o.close() call(\"hadoop fs", "compliance with the License. # You may obtain a copy of the License", "specific language governing permissions and # limitations under the License. #!/usr/bin/env python import", "Here's the looping piece i = 1 v = 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges'", "Copyright 2016 Sotera Defense Solutions Inc. # # Licensed under the Apache License,", "\" + v + \"/output\") os.system(\"cat \" + e + \"/part-* > \"", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "open(e + '/output','r') o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w') for line in", "# Copyright 2016 Sotera Defense Solutions Inc. # # Licensed under the Apache", "open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w') for line in", "not use this file except in compliance with the License. # You may", "Solutions Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "/tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) f = open(e +", "License, Version 2.0 (the \"License\"); # you may not use this file except", "o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in f: if len(line.split('\\t')) == 3: source,weight,edgelist =", "= open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {} for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1)", "the specific language governing permissions and # limitations under the License. #!/usr/bin/env python", "call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) f =", "shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\" + level + \".nodes /tmp/trackcomms/\" + table +", "= open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w') for line", "open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {} for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name", "re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] = name o.write(name + '\\t' +", "\" + e + \"/output\") level = str(i+1) f = open(v + '/output','r')", "+ str(i) + '.edges','w') for line in f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)],", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "= open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in f: if len(line.split('\\t')) == 3: source,weight,edgelist = line.split('\\t')", "line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n') o.close() f.close() i = i + 1 v", "# you may not use this file except in compliance with the License.", "agreed to in writing, software # distributed under the License is distributed on", "= open(e + '/output','r') o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w') for line", "'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat \" + v + \"/part-* > \" + v", "o.close() f.close() i = i + 1 v = 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges'", "(the \"License\"); # you may not use this file except in compliance with", "looping piece i = 1 v = 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e):", "# Unless required by applicable law or agreed to in writing, software #", "e + \"/part-* > \" + e + \"/output\") level = str(i+1) f", "call table = sys.argv[1] garbage = open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices' os.system(\"cat \" +", "by applicable law or agreed to in writing, software # distributed under the", "v + \"/output\") f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "+ \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\" + level +", "table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) f = open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "the License. #!/usr/bin/env python import os import re import sys from subprocess import", "+ '\\n') o.close() f.close() i = i + 1 v = 'output/graphx/level_'+str(i)+'_vertices' e", "f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2)", "nodeMap[id] = name o.write(name + '\\t' + comm + '\\n') f.close() o.close() call(\"hadoop", "file except in compliance with the License. # You may obtain a copy", "fs -put louvain_to_gephi/graphx/community_itr_\" + level + \".nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" +", "+ '\\n') o.close() f.close() # Here's the looping piece i = 1 v", "level + '.nodes','w') for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name =", "License for the specific language governing permissions and # limitations under the License.", "+ '.nodes','w') for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)',", "shell=True) f = open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in f: if len(line.split('\\t'))", "v + \"/part-* > \" + v + \"/output\") f = open(v +", "to in writing, software # distributed under the License is distributed on an", "table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table +", "+ level, stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\" + level + \".nodes /tmp/trackcomms/\"", "implied. # See the License for the specific language governing permissions and #", "o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True)", "\"License\"); # you may not use this file except in compliance with the", "piece i = 1 v = 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "edgelist = edgelist.strip().split(',') for e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close() f.close() #", "re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] = name o.write(name + '\\t' + comm + '\\n') f.close()", "'/output','r') o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w') for line in f: match", "call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) call(\"hadoop", "+ \"/output\") level = str(i+1) f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_'", "the looping piece i = 1 v = 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges' while", "or implied. # See the License for the specific language governing permissions and", "+ \"/part-* > \" + e + \"/output\") level = str(i+1) f =", "match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n') o.close() f.close() i = i", "+ v + \"/part-* > \" + v + \"/output\") os.system(\"cat \" +", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "re import sys from subprocess import call table = sys.argv[1] garbage = open(\"garbage.out\",\"w\")", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "= 'output/graphx/level_0_vertices' os.system(\"cat \" + v + \"/part-* > \" + v +", "in writing, software # distributed under the License is distributed on an \"AS", "call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop fs -put", "line in f: if len(line.split('\\t')) == 3: source,weight,edgelist = line.split('\\t') edgelist = edgelist.strip().split(',')", "\"/output\") os.system(\"cat \" + e + \"/part-* > \" + e + \"/output\")", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= sys.argv[1] garbage = open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices' os.system(\"cat \" + v +", "+ \"/output/graphx/comm_1\", stdout=garbage, shell=True) f = open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in", "comm + '\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_\"", "permissions and # limitations under the License. #!/usr/bin/env python import os import re", "+ '\\t' + comm + '\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" +", "e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close() f.close() # Here's the looping piece", "import sys from subprocess import call table = sys.argv[1] garbage = open(\"garbage.out\",\"w\") v", "line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] = name o.write(name", "stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True)", "name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] = name o.write(name +", "= str(i+1) f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_' + level +", "python import os import re import sys from subprocess import call table =", "table = sys.argv[1] garbage = open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices' os.system(\"cat \" + v", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "comm + '\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\",", "+ comm + '\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table +", "subprocess import call table = sys.argv[1] garbage = open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices' os.system(\"cat", "\"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage,", "name o.write(name + '\\t' + comm + '\\n') f.close() o.close() call(\"hadoop fs -mkdir", "use this file except in compliance with the License. # You may obtain", "o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w') for line in f: match =", "language governing permissions and # limitations under the License. #!/usr/bin/env python import os", "f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage,", "table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) f = open(e + '/output','r') o", "shell=True) f = open(e + '/output','r') o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w')", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "limitations under the License. #!/usr/bin/env python import os import re import sys from", "o.write(name + '\\t' + comm + '\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\"", "re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] = name", "comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] = name o.write(name + '\\t' + comm +", "+ level, stdout=garbage, shell=True) f = open(e + '/output','r') o = open('louvain_to_gephi/graphx/graph_itr_' +", "line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] = name o.write(name + '\\t' + comm", "> \" + v + \"/output\") f = open(v + '/output','r') o =", "= 1 v = 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat \" +", "'\\t' + comm + '\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table", "2.0 (the \"License\"); # you may not use this file except in compliance", "e = 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat \" + v + \"/part-* > \"", "sys.argv[1] garbage = open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices' os.system(\"cat \" + v + \"/part-*", "import re import sys from subprocess import call table = sys.argv[1] garbage =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "\" + e + \"/part-* > \" + e + \"/output\") level =", "== 3: source,weight,edgelist = line.split('\\t') edgelist = edgelist.strip().split(',') for e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1]))", "-put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) f = open('edgelist.tsv','r') o", "+ v + \"/output\") f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap", "Defense Solutions Inc. # # Licensed under the Apache License, Version 2.0 (the", "# # Unless required by applicable law or agreed to in writing, software", "line in f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n') o.close() f.close()", "'\\n') o.close() f.close() # Here's the looping piece i = 1 v =", "source,weight,edgelist = line.split('\\t') edgelist = edgelist.strip().split(',') for e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n')", "table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\" + level", "express or implied. # See the License for the specific language governing permissions", "edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close() f.close() # Here's the looping piece i =", "= open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in f: if len(line.split('\\t')) == 3:", "f = open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for line in f: if len(line.split('\\t')) ==", "either express or implied. # See the License for the specific language governing", "o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop fs", "fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "os.system(\"cat \" + v + \"/part-* > \" + v + \"/output\") os.system(\"cat", "\"/part-* > \" + e + \"/output\") level = str(i+1) f = open(v", "import call table = sys.argv[1] garbage = open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices' os.system(\"cat \"", "from subprocess import call table = sys.argv[1] garbage = open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices'", "'.edges','w') for line in f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n')", "+ table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) f = open('edgelist.tsv','r') o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w') for", "+ e + \"/output\") level = str(i+1) f = open(v + '/output','r') o", "the License. # You may obtain a copy of the License at #", "+ e + \"/part-* > \" + e + \"/output\") level = str(i+1)", "'\\n') o.close() f.close() i = i + 1 v = 'output/graphx/level_'+str(i)+'_vertices' e =", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "+ \"/part-* > \" + v + \"/output\") os.system(\"cat \" + e +", "= re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] = name o.write(name + '\\t' + comm + '\\n')", "+ v + \"/output\") os.system(\"cat \" + e + \"/part-* > \" +", "level, stdout=garbage, shell=True) f = open(e + '/output','r') o = open('louvain_to_gephi/graphx/graph_itr_' + str(i)", "= re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\\n') o.close() f.close() i = i +", "match.group(3))) + '\\n') o.close() f.close() i = i + 1 v = 'output/graphx/level_'+str(i)+'_vertices'", "if len(line.split('\\t')) == 3: source,weight,edgelist = line.split('\\t') edgelist = edgelist.strip().split(',') for e in", "'output/graphx/level_0_vertices' os.system(\"cat \" + v + \"/part-* > \" + v + \"/output\")", "1 v = 'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat \" + v", "+ table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\" +", "with the License. # You may obtain a copy of the License at", "os import re import sys from subprocess import call table = sys.argv[1] garbage", "= open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w') for line in f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)',", "= re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)', line).group(2) nodeMap[id] =", "'\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level,", "/tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" +", "in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm = re.search(r'(communityName):([a-zA-Z0-9\\-]+)',", "= open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices' os.system(\"cat \" + v + \"/part-* > \"", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close() f.close() # Here's the looping piece i", "+ level + \".nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True)", "\"/part-* > \" + v + \"/output\") f = open(v + '/output','r') o", "sys from subprocess import call table = sys.argv[1] garbage = open(\"garbage.out\",\"w\") v =", "str(i+1) f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w')", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "+ '/output','r') o = open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w') for line in f:", "+ level + '.nodes','w') for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name", "-put louvain_to_gephi/graphx/community_itr_\" + level + \".nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level,", "level = str(i+1) f = open(v + '/output','r') o = open('louvain_to_gephi/graphx/community_itr_' + level", "\".nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) f = open(e", "for e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close() f.close() # Here's the looping", "+ '/output','r') o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w') for line in f:", "len(line.split('\\t')) == 3: source,weight,edgelist = line.split('\\t') edgelist = edgelist.strip().split(',') for e in edgelist:", "# Here's the looping piece i = 1 v = 'output/graphx/level_'+str(i)+'_vertices' e =", "in compliance with the License. # You may obtain a copy of the", "and # limitations under the License. #!/usr/bin/env python import os import re import", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "v + \"/output\") os.system(\"cat \" + e + \"/part-* > \" + e", "fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) call(\"hadoop fs", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices' os.system(\"cat \" + v + \"/part-* > \" +", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "# # Copyright 2016 Sotera Defense Solutions Inc. # # Licensed under the", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "= line.split('\\t') edgelist = edgelist.strip().split(',') for e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close()", "o = open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w') for line in f: id =", "+ \"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\",", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "edgelist.strip().split(',') for e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close() f.close() # Here's the", "garbage = open(\"garbage.out\",\"w\") v = 'output/graphx/level_0_vertices' os.system(\"cat \" + v + \"/part-* >", "+ \".nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" + level, stdout=garbage, shell=True) f =", "os.system(\"cat \" + e + \"/part-* > \" + e + \"/output\") level", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "v = 'output/graphx/level_0_vertices' os.system(\"cat \" + v + \"/part-* > \" + v", "+ '\\n') f.close() o.close() call(\"hadoop fs -mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_\" +", "str(i) + '.edges','w') for line in f: match = re.search(r'Edge\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\)', line) o.write('\\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3)))", "-mkdir /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\"", "line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm =", "\"/output/graphx/comm_\" + level, stdout=garbage, shell=True) f = open(e + '/output','r') o = open('louvain_to_gephi/graphx/graph_itr_'", "louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) f = open('edgelist.tsv','r') o =", "\" + v + \"/part-* > \" + v + \"/output\") f =", "Sotera Defense Solutions Inc. # # Licensed under the Apache License, Version 2.0", "f = open(e + '/output','r') o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w') for", "stdout=garbage, shell=True) f = open(e + '/output','r') o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) +", "fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/\" + table + \"/output/graphx/comm_1\", stdout=garbage, shell=True) f = open('edgelist.tsv','r')", "= {} for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)',", "o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w') nodeMap = {} for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)',", "line.split('\\t') edgelist = edgelist.strip().split(',') for e in edgelist: o.write('\\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\\n') o.close() f.close()", "for line in f: id = re.search(r'\\(([a-zA-Z0-9]+)', line).group(1) name = re.search(r'(name):([a-zA-Z0-9\\-]+)', line).group(2) comm", "\"/part-* > \" + v + \"/output\") os.system(\"cat \" + e + \"/part-*", "'output/graphx/level_'+str(i)+'_vertices' e = 'output/graphx/level_'+str(i)+'_edges' while os.path.exists(e): os.system(\"cat \" + v + \"/part-* >", "level, stdout=garbage, shell=True) call(\"hadoop fs -put louvain_to_gephi/graphx/community_itr_\" + level + \".nodes /tmp/trackcomms/\" +", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "os.path.exists(e): os.system(\"cat \" + v + \"/part-* > \" + v + \"/output\")", "# limitations under the License. #!/usr/bin/env python import os import re import sys" ]
[ "through='Follow', related_name='subscriber', blank=True) grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True) exp = models.IntegerField(default=0) bio", "models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True) def __str__(self): return self.user.username class Profile(models.Model): user = models.OneToOneField(User,", "grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True) exp = models.IntegerField(default=0) bio = models.TextField(max_length=500, blank=True)", "def total_likes(self): return self.likes.count() def save(self, *args, **kwargs): try: this = Post.objects.get(id=self.id) if", "'sponsor-ff69b4', 'partner' : 'partner-blueviolet', 'master' : 'master-purple' } def randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ'", "\"\"\" \"\"\" class Request(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete =", "self.avatar: this.avatar.delete(save=False) except: pass super(Profile, self).save(*args, **kwargs) class Follow(models.Model): class Meta: db_table =", "related_name='postlist', blank=True) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.name def get_absolute_url(self): return reverse('series_list',", "self.title def get_absolute_url(self): return reverse('post_detail', args=[self.author, self.url]) def total_likes(self): return self.likes.count() def save(self,", "'blogger-gray', 'contributor' : 'contributor-green', 'supporter' : 'supporter-orange', 'sponsor' : 'sponsor-ff69b4', 'partner' : 'partner-blueviolet',", "import models from django.contrib.auth.models import User from django.template.loader import render_to_string from django.urls import", "on_delete=models.CASCADE) agree_email = models.BooleanField(default=False) agree_history = models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True)", "null=True) def __str__(self): return self.user.username class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) subscriber =", "def save(self, *args, **kwargs): try: this = Profile.objects.get(id=self.id) if this.avatar != self.avatar: this.avatar.delete(save=False)", "res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text def avatar_path(instance, filename): dt = datetime.datetime.now() return", "models.OneToOneField(User, on_delete=models.CASCADE) subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True) grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True,", "blank=True) grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True) exp = models.IntegerField(default=0) bio = models.TextField(max_length=500,", "related_name='likes', blank=True) tag = TagField() created_date = models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now) def __str__(self):", "return self.text class Notify(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) is_read", "= True post = models.ForeignKey(Post, on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now)", "total_subscriber(self): return self.subscriber.count() def save(self, *args, **kwargs): try: this = Profile.objects.get(id=self.id) if this.avatar", "import timezone from tagging.fields import TagField font_mapping = { 'Noto Sans' : 'noto',", "= models.CharField(max_length=100, blank=True) def __str__(self): return self.user.username def total_subscriber(self): return self.subscriber.count() def save(self,", "return self.user.username class Grade(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class", "class Font(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Theme(models.Model): color", "pass super(Profile, self).save(*args, **kwargs) class Follow(models.Model): class Meta: db_table = 'board_user_follow' auto_created =", "models.CharField(max_length=15, unique=True) owner = models.ForeignKey('auth.User') member = models.ManyToManyField(User, related_name='members', blank=True) bio = models.TextField(max_length=500,", "random from django.db import models from django.contrib.auth.models import User from django.template.loader import render_to_string", "self.user.username def total_subscriber(self): return self.subscriber.count() def save(self, *args, **kwargs): try: this = Profile.objects.get(id=self.id)", "Comment(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE) text", "unique=True, allow_unicode=True) posts = models.ManyToManyField(Post, related_name='postlist', blank=True) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return", "models.TextField(max_length=300) edit = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.text class Notify(models.Model):", "return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post, infomation): new_notify = Notify(user=user, post=post, infomation=infomation)", "Follow(models.Model): class Meta: db_table = 'board_user_follow' auto_created = True following = models.ForeignKey(Profile, on_delete=models.CASCADE)", "= models.SlugField(max_length=50, unique=True, allow_unicode=True) posts = models.ManyToManyField(Post, related_name='postlist', blank=True) created_date = models.DateTimeField(default=timezone.now) def", "+ randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance, filename): dt = datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1]", "def __str__(self): return self.text class Notify(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey(Post,", "text_html = models.TextField() trendy = models.IntegerField(default=0) view_cnt = models.IntegerField(default=0) hide = models.BooleanField(default=False) notice", "models.CharField(max_length=100, blank=True) def __str__(self): return self.user.username def total_subscriber(self): return self.subscriber.count() def save(self, *args,", "infomation = models.TextField() created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.infomation class Series(models.Model): owner", "models.CharField(max_length=15, blank=True) homepage = models.CharField(max_length=100, blank=True) def __str__(self): return self.user.username def total_subscriber(self): return", "'Dark Mode' : 'dark', 'Violet' : 'purple', 'Green & Blue' : 'glue' }", "<reponame>woongchoi84/BLEX import requests import datetime import random from django.db import models from django.contrib.auth.models", "import TagField font_mapping = { 'Noto Sans' : 'noto', 'RIDIBatang' : 'ridi', 'Noto", "- 1 result = '' for i in range(length): result += rstr[random.randint(0, rstr_len)]", "TeamPost(models.Model): pass class TeamCategory(models.Model): pass \"\"\" \"\"\" class Request(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE)", "models.CharField(max_length=15, blank=True) twitter = models.CharField(max_length=15, blank=True) youtube = models.CharField(max_length=30, blank=True) facebook = models.CharField(max_length=30,", "from django.template.loader import render_to_string from django.urls import reverse from django.utils import timezone from", "self.name class Theme(models.Model): color = models.CharField(max_length=30, unique=True) def __str__(self): return self.color class Config(models.Model):", "from django.contrib.auth.models import User from django.template.loader import render_to_string from django.urls import reverse from", "agree_history = models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True) post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE,", "django.db import models from django.contrib.auth.models import User from django.template.loader import render_to_string from django.urls", "= models.BooleanField(default=False) agree_history = models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True) post_theme =", "\"\"\" class Request(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE)", "= models.ForeignKey(Profile, on_delete=models.CASCADE) follower = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return", "models.CharField(max_length=30, blank=True) facebook = models.CharField(max_length=30, blank=True) instagram = models.CharField(max_length=15, blank=True) homepage = models.CharField(max_length=100,", "self.post.title class Comment(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', related_name='comments', on_delete =", "+'.'+filename.split('.')[-1] def title_image_path(instance, filename): dt = datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def create_notify(user,", "models.CharField(max_length=30, unique=True) def __str__(self): return self.color class Config(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) agree_email", "tagging.fields import TagField font_mapping = { 'Noto Sans' : 'noto', 'RIDIBatang' : 'ridi',", "avatar = models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model): pass class TeamCategory(models.Model): pass \"\"\" \"\"\" class", "= models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Post(models.Model): author", "return self.infomation class Series(models.Model): owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) name = models.CharField(max_length=50, unique=True) url", "class Team(models.Model): name = models.CharField(max_length=15, unique=True) owner = models.ForeignKey('auth.User') member = models.ManyToManyField(User, related_name='members',", "from django.db import models from django.contrib.auth.models import User from django.template.loader import render_to_string from", "related_name='members', blank=True) bio = models.TextField(max_length=500, blank=True) about = models.TextField(blank=True) avatar = models.ImageField(blank=True, upload_to=team_logo_path)", "class Grade(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Font(models.Model): name", "'board_user_follow' auto_created = True following = models.ForeignKey(Profile, on_delete=models.CASCADE) follower = models.ForeignKey(User, on_delete=models.CASCADE) created_date", "post = models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE) text = models.TextField(max_length=300) edit = models.BooleanField(default=False)", "= True following = models.ForeignKey(Profile, on_delete=models.CASCADE) follower = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now)", "allow_unicode=True) posts = models.ManyToManyField(Post, related_name='postlist', blank=True) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.name", "self).save(*args, **kwargs) class Follow(models.Model): class Meta: db_table = 'board_user_follow' auto_created = True following", "unique=True) def __str__(self): return self.name class Font(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self):", "= 'board_post_likes' auto_created = True post = models.ForeignKey(Post, on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE)", "'Default' : '', 'Dark Mode' : 'dark', 'Violet' : 'purple', 'Green & Blue'", "= models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class MiddleComment(models.Model): pass \"\"\" class History(models.Model):", "created_date = models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class MiddleComment(models.Model): pass \"\"\" class History(models.Model): user =", "= models.ForeignKey('auth.User', on_delete=models.CASCADE) title = models.CharField(max_length=50) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) image =", "'ridi', 'Noto Sans Serif' : 'serif' } theme_mapping = { 'Default' : '',", "blank=True) bio = models.TextField(max_length=500, blank=True) about = models.TextField(blank=True) avatar = models.ImageField(blank=True, upload_to=team_logo_path) class", "range(length): result += rstr[random.randint(0, rstr_len)] return result def parsedown(text): data = {'md': text.encode('utf-8')}", "result += rstr[random.randint(0, rstr_len)] return result def parsedown(text): data = {'md': text.encode('utf-8')} res", "models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self):", "models.OneToOneField(User, on_delete=models.CASCADE) agree_email = models.BooleanField(default=False) agree_history = models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True,", "in range(length): result += rstr[random.randint(0, rstr_len)] return result def parsedown(text): data = {'md':", "django.urls import reverse from django.utils import timezone from tagging.fields import TagField font_mapping =", "randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr) - 1 result = '' for", "return self.likes.count() def save(self, *args, **kwargs): try: this = Post.objects.get(id=self.id) if this.image !=", "pass class TeamCategory(models.Model): pass \"\"\" \"\"\" class Request(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post", "title_image_path(instance, filename): dt = datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post, infomation):", "= models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def", "**kwargs) class Follow(models.Model): class Meta: db_table = 'board_user_follow' auto_created = True following =", "django.contrib.auth.models import User from django.template.loader import render_to_string from django.urls import reverse from django.utils", "= models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE) content = models.TextField(blank=True) is_apply = models.BooleanField(default=False) created_date", "{ 'Noto Sans' : 'noto', 'RIDIBatang' : 'ridi', 'Noto Sans Serif' : 'serif'", "} grade_mapping = { 'blogger' : 'blogger-gray', 'contributor' : 'contributor-green', 'supporter' : 'supporter-orange',", "models.TextField(max_length=500, blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15, blank=True) twitter = models.CharField(max_length=15, blank=True)", "= models.ForeignKey('auth.User', on_delete=models.CASCADE) name = models.CharField(max_length=50, unique=True) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) posts", "user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) comment = models.ForeignKey('board.Comment',", "class TeamPost(models.Model): pass class TeamCategory(models.Model): pass \"\"\" \"\"\" class Request(models.Model): user = models.ForeignKey('auth.User',", "class Notify(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) is_read = models.BooleanField(default=False)", "} theme_mapping = { 'Default' : '', 'Dark Mode' : 'dark', 'Violet' :", "owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) name = models.CharField(max_length=50, unique=True) url = models.SlugField(max_length=50, unique=True, allow_unicode=True)", "models.SlugField(max_length=50, unique=True, allow_unicode=True) posts = models.ManyToManyField(Post, related_name='postlist', blank=True) created_date = models.DateTimeField(default=timezone.now) def __str__(self):", "infomation=infomation) new_notify.save() \"\"\" class Team(models.Model): name = models.CharField(max_length=15, unique=True) owner = models.ForeignKey('auth.User') member", "!= self.image: this.image.delete(save=False) except: pass super(Post, self).save(*args, **kwargs) class PostLikes(models.Model): class Meta: db_table", "render_to_string from django.urls import reverse from django.utils import timezone from tagging.fields import TagField", "Series(models.Model): owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) name = models.CharField(max_length=50, unique=True) url = models.SlugField(max_length=50, unique=True,", "= models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True) grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True) exp =", "exp = models.IntegerField(default=0) bio = models.TextField(max_length=500, blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15,", "= Notify(user=user, post=post, infomation=infomation) new_notify.save() \"\"\" class Team(models.Model): name = models.CharField(max_length=15, unique=True) owner", "self.post.title class Post(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) title = models.CharField(max_length=50) url = models.SlugField(max_length=50,", "models.CASCADE) comment = models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE) content = models.TextField(blank=True) is_apply =", "this.image.delete(save=False) except: pass super(Post, self).save(*args, **kwargs) class PostLikes(models.Model): class Meta: db_table = 'board_post_likes'", "user = models.OneToOneField(User, on_delete=models.CASCADE) agree_email = models.BooleanField(default=False) agree_history = models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font',", "class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True) grade", "this.avatar != self.avatar: this.avatar.delete(save=False) except: pass super(Profile, self).save(*args, **kwargs) class Follow(models.Model): class Meta:", "'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance, filename): dt = datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour)", "on_delete = models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.user.username class Grade(models.Model): name", "on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Post(models.Model): author = models.ForeignKey('auth.User',", "data=data) return res.text def avatar_path(instance, filename): dt = datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/' +", "through='PostLikes', related_name='likes', blank=True) tag = TagField() created_date = models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now) def", "on_delete=models.CASCADE) post = models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE) text = models.TextField(max_length=300) edit =", ": 'partner-blueviolet', 'master' : 'master-purple' } def randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len =", "def __str__(self): return self.name class Font(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return", "models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True) exp = models.IntegerField(default=0) bio = models.TextField(max_length=500, blank=True) avatar =", "models.ManyToManyField(User, related_name='members', blank=True) bio = models.TextField(max_length=500, blank=True) about = models.TextField(blank=True) avatar = models.ImageField(blank=True,", "'blogger' : 'blogger-gray', 'contributor' : 'contributor-green', 'supporter' : 'supporter-orange', 'sponsor' : 'sponsor-ff69b4', 'partner'", "datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance, filename): dt = datetime.datetime.now()", "True following = models.ForeignKey(Profile, on_delete=models.CASCADE) follower = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def", "= models.DateTimeField(default=timezone.now) def __str__(self): return self.text class Notify(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post", "Serif' : 'serif' } theme_mapping = { 'Default' : '', 'Dark Mode' :", ": 'glue' } grade_mapping = { 'blogger' : 'blogger-gray', 'contributor' : 'contributor-green', 'supporter'", "return result def parsedown(text): data = {'md': text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return", ": 'sponsor-ff69b4', 'partner' : 'partner-blueviolet', 'master' : 'master-purple' } def randstr(length): rstr =", "= models.IntegerField(default=0) view_cnt = models.IntegerField(default=0) hide = models.BooleanField(default=False) notice = models.BooleanField(default=False) block_comment =", "total_likes(self): return self.likes.count() def save(self, *args, **kwargs): try: this = Post.objects.get(id=self.id) if this.image", "models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE) text = models.TextField(max_length=300) edit", "models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) is_read = models.BooleanField(default=False) infomation = models.TextField() created_date", "name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Theme(models.Model): color = models.CharField(max_length=30,", "\"\"\" class History(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE)", "Meta: db_table = 'board_post_likes' auto_created = True post = models.ForeignKey(Post, on_delete=models.CASCADE) user =", "= models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.text class Notify(models.Model): user =", "on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Comment(models.Model): author = models.ForeignKey('auth.User',", "'' for i in range(length): result += rstr[random.randint(0, rstr_len)] return result def parsedown(text):", "is_apply = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class MiddleComment(models.Model): pass \"\"\" class", "on_delete=models.CASCADE) title = models.CharField(max_length=50) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) image = models.ImageField(blank=True, upload_to=title_image_path)", "Post.objects.get(id=self.id) if this.image != self.image: this.image.delete(save=False) except: pass super(Post, self).save(*args, **kwargs) class PostLikes(models.Model):", "= models.CharField(max_length=30, unique=True) def __str__(self): return self.color class Config(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE)", "__str__(self): return self.text class Notify(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE)", "= models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Font(models.Model): name = models.CharField(max_length=30, unique=True)", "def __str__(self): return self.user.username def total_subscriber(self): return self.subscriber.count() def save(self, *args, **kwargs): try:", "block_comment = models.BooleanField(default=False) likes = models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True) tag = TagField() created_date", "= models.BooleanField(default=False) notice = models.BooleanField(default=False) block_comment = models.BooleanField(default=False) likes = models.ManyToManyField(User, through='PostLikes', related_name='likes',", "Post(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) title = models.CharField(max_length=50) url = models.SlugField(max_length=50, unique=True, allow_unicode=True)", "auto_created = True post = models.ForeignKey(Post, on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) created_date =", "class Series(models.Model): owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) name = models.CharField(max_length=50, unique=True) url = models.SlugField(max_length=50,", ": 'blogger-gray', 'contributor' : 'contributor-green', 'supporter' : 'supporter-orange', 'sponsor' : 'sponsor-ff69b4', 'partner' :", "Notify(user=user, post=post, infomation=infomation) new_notify.save() \"\"\" class Team(models.Model): name = models.CharField(max_length=15, unique=True) owner =", "models.ForeignKey('auth.User', on_delete=models.CASCADE) title = models.CharField(max_length=50) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) image = models.ImageField(blank=True,", "models.ImageField(blank=True, upload_to=title_image_path) text_md = models.TextField() text_html = models.TextField() trendy = models.IntegerField(default=0) view_cnt =", "def randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr) - 1 result = ''", "def save(self, *args, **kwargs): try: this = Post.objects.get(id=self.id) if this.image != self.image: this.image.delete(save=False)", "requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text def avatar_path(instance, filename): dt = datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/'", "this = Post.objects.get(id=self.id) if this.image != self.image: this.image.delete(save=False) except: pass super(Post, self).save(*args, **kwargs)", "return self.title def get_absolute_url(self): return reverse('post_detail', args=[self.author, self.url]) def total_likes(self): return self.likes.count() def", "notice = models.BooleanField(default=False) block_comment = models.BooleanField(default=False) likes = models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True) tag", "__str__(self): return self.user.username class Grade(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name", "models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) comment = models.ForeignKey('board.Comment', related_name='request', on_delete", "Blue' : 'glue' } grade_mapping = { 'blogger' : 'blogger-gray', 'contributor' : 'contributor-green',", "from django.utils import timezone from tagging.fields import TagField font_mapping = { 'Noto Sans'", "author = models.ForeignKey('auth.User', on_delete=models.CASCADE) title = models.CharField(max_length=50) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) image", "def __str__(self): return self.user.username class Grade(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return", "'Noto Sans' : 'noto', 'RIDIBatang' : 'ridi', 'Noto Sans Serif' : 'serif' }", "'/' + randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance, filename): dt = datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) +", "return reverse('post_detail', args=[self.author, self.url]) def total_likes(self): return self.likes.count() def save(self, *args, **kwargs): try:", "models.TextField() created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.infomation class Series(models.Model): owner = models.ForeignKey('auth.User',", "= models.CharField(max_length=15, blank=True) twitter = models.CharField(max_length=15, blank=True) youtube = models.CharField(max_length=30, blank=True) facebook =", "models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Post(models.Model): author =", "models.TextField() trendy = models.IntegerField(default=0) view_cnt = models.IntegerField(default=0) hide = models.BooleanField(default=False) notice = models.BooleanField(default=False)", "import render_to_string from django.urls import reverse from django.utils import timezone from tagging.fields import", "post = models.ForeignKey('board.Post', on_delete = models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.user.username", "db_table = 'board_user_follow' auto_created = True following = models.ForeignKey(Profile, on_delete=models.CASCADE) follower = models.ForeignKey(User,", "{ 'blogger' : 'blogger-gray', 'contributor' : 'contributor-green', 'supporter' : 'supporter-orange', 'sponsor' : 'sponsor-ff69b4',", "models.DateTimeField(default=timezone.now) def __str__(self): return self.user.username class Grade(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self):", "= models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model): pass class TeamCategory(models.Model): pass \"\"\" \"\"\" class Request(models.Model):", "homepage = models.CharField(max_length=100, blank=True) def __str__(self): return self.user.username def total_subscriber(self): return self.subscriber.count() def", "= models.TextField() text_html = models.TextField() trendy = models.IntegerField(default=0) view_cnt = models.IntegerField(default=0) hide =", "follower = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Post(models.Model):", "models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class MiddleComment(models.Model): pass \"\"\" class History(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE)", "MiddleComment(models.Model): pass \"\"\" class History(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete", "twitter = models.CharField(max_length=15, blank=True) youtube = models.CharField(max_length=30, blank=True) facebook = models.CharField(max_length=30, blank=True) instagram", "randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance, filename): dt = datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def", "models.DateTimeField(default=timezone.now) def __str__(self): return self.title def get_absolute_url(self): return reverse('post_detail', args=[self.author, self.url]) def total_likes(self):", "= models.BooleanField(default=False) block_comment = models.BooleanField(default=False) likes = models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True) tag =", "instagram = models.CharField(max_length=15, blank=True) homepage = models.CharField(max_length=100, blank=True) def __str__(self): return self.user.username def", "post, infomation): new_notify = Notify(user=user, post=post, infomation=infomation) new_notify.save() \"\"\" class Team(models.Model): name =", "models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Comment(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) post =", "models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Font(models.Model): name = models.CharField(max_length=30, unique=True) def", "github = models.CharField(max_length=15, blank=True) twitter = models.CharField(max_length=15, blank=True) youtube = models.CharField(max_length=30, blank=True) facebook", ": 'dark', 'Violet' : 'purple', 'Green & Blue' : 'glue' } grade_mapping =", "return 'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance, filename): dt = datetime.datetime.now() return", "infomation): new_notify = Notify(user=user, post=post, infomation=infomation) new_notify.save() \"\"\" class Team(models.Model): name = models.CharField(max_length=15,", "except: pass super(Post, self).save(*args, **kwargs) class PostLikes(models.Model): class Meta: db_table = 'board_post_likes' auto_created", "from django.urls import reverse from django.utils import timezone from tagging.fields import TagField font_mapping", "= models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True) def __str__(self): return self.user.username class Profile(models.Model): user =", "created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.text class Notify(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE)", "theme_mapping = { 'Default' : '', 'Dark Mode' : 'dark', 'Violet' : 'purple',", "models.CharField(max_length=15, blank=True) youtube = models.CharField(max_length=30, blank=True) facebook = models.CharField(max_length=30, blank=True) instagram = models.CharField(max_length=15,", "'noto', 'RIDIBatang' : 'ridi', 'Noto Sans Serif' : 'serif' } theme_mapping = {", "= models.ForeignKey(Post, on_delete=models.CASCADE) is_read = models.BooleanField(default=False) infomation = models.TextField() created_date = models.DateTimeField(default=timezone.now) def", "blank=True) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.name def get_absolute_url(self): return reverse('series_list', args=[self.owner,", "unique=True, allow_unicode=True) image = models.ImageField(blank=True, upload_to=title_image_path) text_md = models.TextField() text_html = models.TextField() trendy", "related_name='request', on_delete = models.CASCADE) content = models.TextField(blank=True) is_apply = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now)", "new_notify = Notify(user=user, post=post, infomation=infomation) new_notify.save() \"\"\" class Team(models.Model): name = models.CharField(max_length=15, unique=True)", ": 'noto', 'RIDIBatang' : 'ridi', 'Noto Sans Serif' : 'serif' } theme_mapping =", "__str__(self): return self.infomation class Series(models.Model): owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) name = models.CharField(max_length=50, unique=True)", "return self.post.title class Post(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) title = models.CharField(max_length=50) url =", "models.BooleanField(default=False) agree_history = models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True) post_theme = models.ForeignKey('board.Theme',", "upload_to=team_logo_path) class TeamPost(models.Model): pass class TeamCategory(models.Model): pass \"\"\" \"\"\" class Request(models.Model): user =", "'serif' } theme_mapping = { 'Default' : '', 'Dark Mode' : 'dark', 'Violet'", "created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.user.username class Grade(models.Model): name = models.CharField(max_length=30, unique=True)", "bio = models.TextField(max_length=500, blank=True) about = models.TextField(blank=True) avatar = models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model):", "text = models.TextField(max_length=300) edit = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.text", "import User from django.template.loader import render_to_string from django.urls import reverse from django.utils import", "save(self, *args, **kwargs): try: this = Profile.objects.get(id=self.id) if this.avatar != self.avatar: this.avatar.delete(save=False) except:", "'Violet' : 'purple', 'Green & Blue' : 'glue' } grade_mapping = { 'blogger'", "member = models.ManyToManyField(User, related_name='members', blank=True) bio = models.TextField(max_length=500, blank=True) about = models.TextField(blank=True) avatar", "view_cnt = models.IntegerField(default=0) hide = models.BooleanField(default=False) notice = models.BooleanField(default=False) block_comment = models.BooleanField(default=False) likes", "models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True) tag = TagField() created_date = models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now)", "post = models.ForeignKey(Post, on_delete=models.CASCADE) is_read = models.BooleanField(default=False) infomation = models.TextField() created_date = models.DateTimeField(default=timezone.now)", "trendy = models.IntegerField(default=0) view_cnt = models.IntegerField(default=0) hide = models.BooleanField(default=False) notice = models.BooleanField(default=False) block_comment", "on_delete = models.CASCADE) text = models.TextField(max_length=300) edit = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) def", "rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr) - 1 result = '' for i", "self.color class Config(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) agree_email = models.BooleanField(default=False) agree_history = models.BooleanField(default=False)", "= { 'Default' : '', 'Dark Mode' : 'dark', 'Violet' : 'purple', 'Green", "hide = models.BooleanField(default=False) notice = models.BooleanField(default=False) block_comment = models.BooleanField(default=False) likes = models.ManyToManyField(User, through='PostLikes',", "on_delete=models.CASCADE, blank=True, null=True) def __str__(self): return self.user.username class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE)", "models.BooleanField(default=False) likes = models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True) tag = TagField() created_date = models.DateTimeField(default=timezone.now)", "models.CharField(max_length=30, blank=True) instagram = models.CharField(max_length=15, blank=True) homepage = models.CharField(max_length=100, blank=True) def __str__(self): return", "models.ForeignKey(Post, on_delete=models.CASCADE) is_read = models.BooleanField(default=False) infomation = models.TextField() created_date = models.DateTimeField(default=timezone.now) def __str__(self):", "on_delete=models.CASCADE, blank=True, null=True) post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True) def __str__(self): return self.user.username", "= models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class MiddleComment(models.Model): pass \"\"\" class History(models.Model): user = models.ForeignKey('auth.User',", "pass super(Post, self).save(*args, **kwargs) class PostLikes(models.Model): class Meta: db_table = 'board_post_likes' auto_created =", "TagField font_mapping = { 'Noto Sans' : 'noto', 'RIDIBatang' : 'ridi', 'Noto Sans", "= models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15, blank=True) twitter = models.CharField(max_length=15, blank=True) youtube = models.CharField(max_length=30,", "'supporter' : 'supporter-orange', 'sponsor' : 'sponsor-ff69b4', 'partner' : 'partner-blueviolet', 'master' : 'master-purple' }", "try: this = Post.objects.get(id=self.id) if this.image != self.image: this.image.delete(save=False) except: pass super(Post, self).save(*args,", "result def parsedown(text): data = {'md': text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text", "\"\"\" class MiddleComment(models.Model): pass \"\"\" class History(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post =", ": 'contributor-green', 'supporter' : 'supporter-orange', 'sponsor' : 'sponsor-ff69b4', 'partner' : 'partner-blueviolet', 'master' :", "blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15, blank=True) twitter = models.CharField(max_length=15, blank=True) youtube", "models.IntegerField(default=0) bio = models.TextField(max_length=500, blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15, blank=True) twitter", "= models.DateTimeField(default=timezone.now) def __str__(self): return self.infomation class Series(models.Model): owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) name", "agree_email = models.BooleanField(default=False) agree_history = models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True) post_theme", "Mode' : 'dark', 'Violet' : 'purple', 'Green & Blue' : 'glue' } grade_mapping", "models.IntegerField(default=0) hide = models.BooleanField(default=False) notice = models.BooleanField(default=False) block_comment = models.BooleanField(default=False) likes = models.ManyToManyField(User,", "on_delete=models.CASCADE, blank=True, null=True) exp = models.IntegerField(default=0) bio = models.TextField(max_length=500, blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path)", "class Comment(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE)", "models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Comment(models.Model): author =", "= models.BooleanField(default=False) likes = models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True) tag = TagField() created_date =", "models.TextField(blank=True) is_apply = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class MiddleComment(models.Model): pass \"\"\"", "return self.user.username class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber',", "= models.TextField() trendy = models.IntegerField(default=0) view_cnt = models.IntegerField(default=0) hide = models.BooleanField(default=False) notice =", "+ randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post, infomation): new_notify = Notify(user=user, post=post, infomation=infomation) new_notify.save() \"\"\"", "models.CASCADE) text = models.TextField(max_length=300) edit = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return", "if this.image != self.image: this.image.delete(save=False) except: pass super(Post, self).save(*args, **kwargs) class PostLikes(models.Model): class", "requests import datetime import random from django.db import models from django.contrib.auth.models import User", ": 'serif' } theme_mapping = { 'Default' : '', 'Dark Mode' : 'dark',", "timezone from tagging.fields import TagField font_mapping = { 'Noto Sans' : 'noto', 'RIDIBatang'", "on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return", "= models.IntegerField(default=0) hide = models.BooleanField(default=False) notice = models.BooleanField(default=False) block_comment = models.BooleanField(default=False) likes =", "'master' : 'master-purple' } def randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr) -", "following = models.ForeignKey(Profile, on_delete=models.CASCADE) follower = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self):", "created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.name def get_absolute_url(self): return reverse('series_list', args=[self.owner, self.url])", "def avatar_path(instance, filename): dt = datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1] def", "grade_mapping = { 'blogger' : 'blogger-gray', 'contributor' : 'contributor-green', 'supporter' : 'supporter-orange', 'sponsor'", "class Follow(models.Model): class Meta: db_table = 'board_user_follow' auto_created = True following = models.ForeignKey(Profile,", "models.TextField(blank=True) avatar = models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model): pass class TeamCategory(models.Model): pass \"\"\" \"\"\"", "null=True) post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True) def __str__(self): return self.user.username class Profile(models.Model):", "self.subscriber.count() def save(self, *args, **kwargs): try: this = Profile.objects.get(id=self.id) if this.avatar != self.avatar:", "except: pass super(Profile, self).save(*args, **kwargs) class Follow(models.Model): class Meta: db_table = 'board_user_follow' auto_created", "self.likes.count() def save(self, *args, **kwargs): try: this = Post.objects.get(id=self.id) if this.image != self.image:", "Team(models.Model): name = models.CharField(max_length=15, unique=True) owner = models.ForeignKey('auth.User') member = models.ManyToManyField(User, related_name='members', blank=True)", "datetime import random from django.db import models from django.contrib.auth.models import User from django.template.loader", "def __str__(self): return self.user.username class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) subscriber = models.ManyToManyField(User,", "models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.title def get_absolute_url(self): return reverse('post_detail', args=[self.author,", "datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post, infomation): new_notify = Notify(user=user, post=post,", "models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Post(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) title =", "import reverse from django.utils import timezone from tagging.fields import TagField font_mapping = {", "author = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE) text =", "for i in range(length): result += rstr[random.randint(0, rstr_len)] return result def parsedown(text): data", ": 'supporter-orange', 'sponsor' : 'sponsor-ff69b4', 'partner' : 'partner-blueviolet', 'master' : 'master-purple' } def", "rstr_len)] return result def parsedown(text): data = {'md': text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data)", "'sponsor' : 'sponsor-ff69b4', 'partner' : 'partner-blueviolet', 'master' : 'master-purple' } def randstr(length): rstr", "*args, **kwargs): try: this = Profile.objects.get(id=self.id) if this.avatar != self.avatar: this.avatar.delete(save=False) except: pass", "= models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Theme(models.Model): color = models.CharField(max_length=30, unique=True)", "post = models.ForeignKey('board.Post', on_delete = models.CASCADE) comment = models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE)", "class TeamCategory(models.Model): pass \"\"\" \"\"\" class Request(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post =", "= models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) comment = models.ForeignKey('board.Comment', related_name='request',", "TagField() created_date = models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.title def get_absolute_url(self):", "__str__(self): return self.user.username class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) subscriber = models.ManyToManyField(User, through='Follow',", "rstr_len = len(rstr) - 1 result = '' for i in range(length): result", "= models.TextField(max_length=500, blank=True) about = models.TextField(blank=True) avatar = models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model): pass", "models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model): pass class TeamCategory(models.Model): pass \"\"\" \"\"\" class Request(models.Model): user", "on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) is_read = models.BooleanField(default=False) infomation = models.TextField() created_date =", "= models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True) exp = models.IntegerField(default=0) bio = models.TextField(max_length=500, blank=True) avatar", "\"\"\" class Team(models.Model): name = models.CharField(max_length=15, unique=True) owner = models.ForeignKey('auth.User') member = models.ManyToManyField(User,", "= len(rstr) - 1 result = '' for i in range(length): result +=", "= models.CharField(max_length=50, unique=True) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) posts = models.ManyToManyField(Post, related_name='postlist', blank=True)", "Theme(models.Model): color = models.CharField(max_length=30, unique=True) def __str__(self): return self.color class Config(models.Model): user =", "blank=True, null=True) def __str__(self): return self.user.username class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) subscriber", "new_notify.save() \"\"\" class Team(models.Model): name = models.CharField(max_length=15, unique=True) owner = models.ForeignKey('auth.User') member =", "Meta: db_table = 'board_user_follow' auto_created = True following = models.ForeignKey(Profile, on_delete=models.CASCADE) follower =", "{ 'Default' : '', 'Dark Mode' : 'dark', 'Violet' : 'purple', 'Green &", "= models.OneToOneField(User, on_delete=models.CASCADE) agree_email = models.BooleanField(default=False) agree_history = models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE,", "models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.text class Notify(models.Model): user = models.ForeignKey('auth.User',", "text_md = models.TextField() text_html = models.TextField() trendy = models.IntegerField(default=0) view_cnt = models.IntegerField(default=0) hide", "__str__(self): return self.post.title class Comment(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', related_name='comments',", "reverse('post_detail', args=[self.author, self.url]) def total_likes(self): return self.likes.count() def save(self, *args, **kwargs): try: this", "models.ForeignKey(Post, on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title", "user = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Comment(models.Model):", ": 'master-purple' } def randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr) - 1", "avatar_path(instance, filename): dt = datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance,", "models.ForeignKey('auth.User', on_delete=models.CASCADE) name = models.CharField(max_length=50, unique=True) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) posts =", "class Request(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) comment", "on_delete=models.CASCADE) follower = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class", "= models.CharField(max_length=15, unique=True) owner = models.ForeignKey('auth.User') member = models.ManyToManyField(User, related_name='members', blank=True) bio =", "**kwargs) class PostLikes(models.Model): class Meta: db_table = 'board_post_likes' auto_created = True post =", "related_name='subscriber', blank=True) grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True) exp = models.IntegerField(default=0) bio =", "related_name='comments', on_delete = models.CASCADE) text = models.TextField(max_length=300) edit = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now)", "= models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE) text = models.TextField(max_length=300)", "on_delete=models.CASCADE) subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True) grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True)", "= models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True) tag = TagField() created_date = models.DateTimeField(default=timezone.now) updated_date =", "models.ForeignKey(Profile, on_delete=models.CASCADE) follower = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title", "models.CASCADE) content = models.TextField(blank=True) is_apply = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class", "= models.TextField(max_length=500, blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15, blank=True) twitter = models.CharField(max_length=15,", "models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.user.username class Grade(models.Model): name = models.CharField(max_length=30,", "on_delete=models.CASCADE) name = models.CharField(max_length=50, unique=True) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) posts = models.ManyToManyField(Post,", "blank=True, null=True) post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True) def __str__(self): return self.user.username class", "def __str__(self): return self.title def get_absolute_url(self): return reverse('post_detail', args=[self.author, self.url]) def total_likes(self): return", "Notify(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) is_read = models.BooleanField(default=False) infomation", "= models.CASCADE) comment = models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE) content = models.TextField(blank=True) is_apply", "'board_post_likes' auto_created = True post = models.ForeignKey(Post, on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) created_date", "on_delete=models.CASCADE) is_read = models.BooleanField(default=False) infomation = models.TextField() created_date = models.DateTimeField(default=timezone.now) def __str__(self): return", "+= rstr[random.randint(0, rstr_len)] return result def parsedown(text): data = {'md': text.encode('utf-8')} res =", "'contributor-green', 'supporter' : 'supporter-orange', 'sponsor' : 'sponsor-ff69b4', 'partner' : 'partner-blueviolet', 'master' : 'master-purple'", "Config(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) agree_email = models.BooleanField(default=False) agree_history = models.BooleanField(default=False) post_fonts =", "class Meta: db_table = 'board_user_follow' auto_created = True following = models.ForeignKey(Profile, on_delete=models.CASCADE) follower", "if this.avatar != self.avatar: this.avatar.delete(save=False) except: pass super(Profile, self).save(*args, **kwargs) class Follow(models.Model): class", "self.text class Notify(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) is_read =", "user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) is_read = models.BooleanField(default=False) infomation =", "def title_image_path(instance, filename): dt = datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post,", "rstr[random.randint(0, rstr_len)] return result def parsedown(text): data = {'md': text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php',", "user = models.OneToOneField(User, on_delete=models.CASCADE) subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True) grade = models.ForeignKey('board.Grade',", "= models.IntegerField(default=0) bio = models.TextField(max_length=500, blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15, blank=True)", "randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post, infomation): new_notify = Notify(user=user, post=post, infomation=infomation) new_notify.save() \"\"\" class", "models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class MiddleComment(models.Model): pass \"\"\" class History(models.Model): user", "on_delete = models.CASCADE) comment = models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE) content = models.TextField(blank=True)", "= datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance, filename): dt =", "= models.ManyToManyField(Post, related_name='postlist', blank=True) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.name def get_absolute_url(self):", "= models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.title def get_absolute_url(self): return reverse('post_detail',", "= models.SlugField(max_length=50, unique=True, allow_unicode=True) image = models.ImageField(blank=True, upload_to=title_image_path) text_md = models.TextField() text_html =", "= TagField() created_date = models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.title def", "= models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Post(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) title", "pass \"\"\" \"\"\" class Request(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete", "return self.subscriber.count() def save(self, *args, **kwargs): try: this = Profile.objects.get(id=self.id) if this.avatar !=", "Request(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) comment =", "name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Font(models.Model): name = models.CharField(max_length=30,", "models.BooleanField(default=False) notice = models.BooleanField(default=False) block_comment = models.BooleanField(default=False) likes = models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True)", "created_date = models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.title def get_absolute_url(self): return", "= models.ForeignKey(Post, on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return", "__str__(self): return self.name class Theme(models.Model): color = models.CharField(max_length=30, unique=True) def __str__(self): return self.color", "class History(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) created_date", "'partner-blueviolet', 'master' : 'master-purple' } def randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr)", "def create_notify(user, post, infomation): new_notify = Notify(user=user, post=post, infomation=infomation) new_notify.save() \"\"\" class Team(models.Model):", "models.BooleanField(default=False) infomation = models.TextField() created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.infomation class Series(models.Model):", "models.DateTimeField(default=timezone.now) def __str__(self): return self.infomation class Series(models.Model): owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) name =", "blank=True) youtube = models.CharField(max_length=30, blank=True) facebook = models.CharField(max_length=30, blank=True) instagram = models.CharField(max_length=15, blank=True)", "= models.CASCADE) text = models.TextField(max_length=300) edit = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) def __str__(self):", "= models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey(Post, on_delete=models.CASCADE) is_read = models.BooleanField(default=False) infomation = models.TextField()", "owner = models.ForeignKey('auth.User') member = models.ManyToManyField(User, related_name='members', blank=True) bio = models.TextField(max_length=500, blank=True) about", "import requests import datetime import random from django.db import models from django.contrib.auth.models import", "'purple', 'Green & Blue' : 'glue' } grade_mapping = { 'blogger' : 'blogger-gray',", "name = models.CharField(max_length=15, unique=True) owner = models.ForeignKey('auth.User') member = models.ManyToManyField(User, related_name='members', blank=True) bio", "return self.user.username def total_subscriber(self): return self.subscriber.count() def save(self, *args, **kwargs): try: this =", "color = models.CharField(max_length=30, unique=True) def __str__(self): return self.color class Config(models.Model): user = models.OneToOneField(User,", "upload_to=title_image_path) text_md = models.TextField() text_html = models.TextField() trendy = models.IntegerField(default=0) view_cnt = models.IntegerField(default=0)", "= models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Comment(models.Model): author", "font_mapping = { 'Noto Sans' : 'noto', 'RIDIBatang' : 'ridi', 'Noto Sans Serif'", "dt = datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post, infomation): new_notify =", "= models.CharField(max_length=15, blank=True) homepage = models.CharField(max_length=100, blank=True) def __str__(self): return self.user.username def total_subscriber(self):", "django.utils import timezone from tagging.fields import TagField font_mapping = { 'Noto Sans' :", "return self.name class Font(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class", "= models.CASCADE) content = models.TextField(blank=True) is_apply = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) \"\"\" \"\"\"", "reverse from django.utils import timezone from tagging.fields import TagField font_mapping = { 'Noto", "models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True) grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True) exp = models.IntegerField(default=0)", "name = models.CharField(max_length=50, unique=True) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) posts = models.ManyToManyField(Post, related_name='postlist',", "import datetime import random from django.db import models from django.contrib.auth.models import User from", "User from django.template.loader import render_to_string from django.urls import reverse from django.utils import timezone", "= models.CharField(max_length=15, blank=True) youtube = models.CharField(max_length=30, blank=True) facebook = models.CharField(max_length=30, blank=True) instagram =", "'RIDIBatang' : 'ridi', 'Noto Sans Serif' : 'serif' } theme_mapping = { 'Default'", "post=post, infomation=infomation) new_notify.save() \"\"\" class Team(models.Model): name = models.CharField(max_length=15, unique=True) owner = models.ForeignKey('auth.User')", "class Config(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) agree_email = models.BooleanField(default=False) agree_history = models.BooleanField(default=False) post_fonts", "blank=True, null=True) exp = models.IntegerField(default=0) bio = models.TextField(max_length=500, blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path) github", "Profile.objects.get(id=self.id) if this.avatar != self.avatar: this.avatar.delete(save=False) except: pass super(Profile, self).save(*args, **kwargs) class Follow(models.Model):", "= models.ImageField(blank=True, upload_to=title_image_path) text_md = models.TextField() text_html = models.TextField() trendy = models.IntegerField(default=0) view_cnt", "= models.OneToOneField(User, on_delete=models.CASCADE) subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True) grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE,", "models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE) content = models.TextField(blank=True) is_apply = models.BooleanField(default=False) created_date =", "= models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True) post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True,", "def parsedown(text): data = {'md': text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text def", "{'md': text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text def avatar_path(instance, filename): dt =", "return res.text def avatar_path(instance, filename): dt = datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/' + randstr(4)", "self).save(*args, **kwargs) class PostLikes(models.Model): class Meta: db_table = 'board_post_likes' auto_created = True post", "self.infomation class Series(models.Model): owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) name = models.CharField(max_length=50, unique=True) url =", "= models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Comment(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) post", "Sans Serif' : 'serif' } theme_mapping = { 'Default' : '', 'Dark Mode'", "__str__(self): return self.title def get_absolute_url(self): return reverse('post_detail', args=[self.author, self.url]) def total_likes(self): return self.likes.count()", "return self.name class Theme(models.Model): color = models.CharField(max_length=30, unique=True) def __str__(self): return self.color class", "db_table = 'board_post_likes' auto_created = True post = models.ForeignKey(Post, on_delete=models.CASCADE) user = models.ForeignKey(User,", "*args, **kwargs): try: this = Post.objects.get(id=self.id) if this.image != self.image: this.image.delete(save=False) except: pass", "filename): dt = datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post, infomation): new_notify", "this = Profile.objects.get(id=self.id) if this.avatar != self.avatar: this.avatar.delete(save=False) except: pass super(Profile, self).save(*args, **kwargs)", "allow_unicode=True) image = models.ImageField(blank=True, upload_to=title_image_path) text_md = models.TextField() text_html = models.TextField() trendy =", "= models.ForeignKey('board.Post', on_delete = models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.user.username class", "1 result = '' for i in range(length): result += rstr[random.randint(0, rstr_len)] return", "self.url]) def total_likes(self): return self.likes.count() def save(self, *args, **kwargs): try: this = Post.objects.get(id=self.id)", "'Green & Blue' : 'glue' } grade_mapping = { 'blogger' : 'blogger-gray', 'contributor'", "models.TextField() text_html = models.TextField() trendy = models.IntegerField(default=0) view_cnt = models.IntegerField(default=0) hide = models.BooleanField(default=False)", "avatar = models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15, blank=True) twitter = models.CharField(max_length=15, blank=True) youtube =", "= 'board_user_follow' auto_created = True following = models.ForeignKey(Profile, on_delete=models.CASCADE) follower = models.ForeignKey(User, on_delete=models.CASCADE)", "class Post(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) title = models.CharField(max_length=50) url = models.SlugField(max_length=50, unique=True,", "url = models.SlugField(max_length=50, unique=True, allow_unicode=True) image = models.ImageField(blank=True, upload_to=title_image_path) text_md = models.TextField() text_html", "def __str__(self): return self.post.title class Post(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) title = models.CharField(max_length=50)", "super(Post, self).save(*args, **kwargs) class PostLikes(models.Model): class Meta: db_table = 'board_post_likes' auto_created = True", "= models.TextField(blank=True) is_apply = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class MiddleComment(models.Model): pass", "= requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text def avatar_path(instance, filename): dt = datetime.datetime.now() return 'avatar/u/'+instance.user.username+", "post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True) post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True) def", "'0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr) - 1 result = '' for i in range(length):", "models.CharField(max_length=50, unique=True) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) posts = models.ManyToManyField(Post, related_name='postlist', blank=True) created_date", "edit = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.text class Notify(models.Model): user", "unique=True) def __str__(self): return self.name class Theme(models.Model): color = models.CharField(max_length=30, unique=True) def __str__(self):", "models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15, blank=True) twitter = models.CharField(max_length=15, blank=True) youtube = models.CharField(max_length=30, blank=True)", "models from django.contrib.auth.models import User from django.template.loader import render_to_string from django.urls import reverse", "dt = datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance, filename): dt", "res.text def avatar_path(instance, filename): dt = datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1]", "= datetime.datetime.now() return 'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post, infomation): new_notify = Notify(user=user,", ": '', 'Dark Mode' : 'dark', 'Violet' : 'purple', 'Green & Blue' :", ": 'ridi', 'Noto Sans Serif' : 'serif' } theme_mapping = { 'Default' :", "'partner' : 'partner-blueviolet', 'master' : 'master-purple' } def randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len", "django.template.loader import render_to_string from django.urls import reverse from django.utils import timezone from tagging.fields", "= models.BooleanField(default=False) infomation = models.TextField() created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.infomation class", "super(Profile, self).save(*args, **kwargs) class Follow(models.Model): class Meta: db_table = 'board_user_follow' auto_created = True", "True post = models.ForeignKey(Post, on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def", "'Noto Sans Serif' : 'serif' } theme_mapping = { 'Default' : '', 'Dark", "blank=True) about = models.TextField(blank=True) avatar = models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model): pass class TeamCategory(models.Model):", "models.TextField(max_length=500, blank=True) about = models.TextField(blank=True) avatar = models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model): pass class", "try: this = Profile.objects.get(id=self.id) if this.avatar != self.avatar: this.avatar.delete(save=False) except: pass super(Profile, self).save(*args,", "self.user.username class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True)", "= Profile.objects.get(id=self.id) if this.avatar != self.avatar: this.avatar.delete(save=False) except: pass super(Profile, self).save(*args, **kwargs) class", "get_absolute_url(self): return reverse('post_detail', args=[self.author, self.url]) def total_likes(self): return self.likes.count() def save(self, *args, **kwargs):", "content = models.TextField(blank=True) is_apply = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) \"\"\" \"\"\" class MiddleComment(models.Model):", "= Post.objects.get(id=self.id) if this.image != self.image: this.image.delete(save=False) except: pass super(Post, self).save(*args, **kwargs) class", "on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class", "= models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.user.username class Grade(models.Model): name =", "on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) comment = models.ForeignKey('board.Comment', related_name='request', on_delete =", "History(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) created_date =", "likes = models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True) tag = TagField() created_date = models.DateTimeField(default=timezone.now) updated_date", "class MiddleComment(models.Model): pass \"\"\" class History(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post',", "posts = models.ManyToManyField(Post, related_name='postlist', blank=True) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.name def", "created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Post(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE)", "'title/'+instance.author.username+'/'+str(dt.year)+'/'+str(dt.month)+'/'+str(dt.day)+'/'+str(dt.hour) + randstr(4)+'.'+filename.split('.')[-1] def create_notify(user, post, infomation): new_notify = Notify(user=user, post=post, infomation=infomation) new_notify.save()", "is_read = models.BooleanField(default=False) infomation = models.TextField() created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.infomation", "pass \"\"\" class History(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete =", "def __str__(self): return self.name class Theme(models.Model): color = models.CharField(max_length=30, unique=True) def __str__(self): return", "post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True) def __str__(self): return self.user.username class Profile(models.Model): user", "Font(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Theme(models.Model): color =", "__str__(self): return self.post.title class Post(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) title = models.CharField(max_length=50) url", "on_delete = models.CASCADE) content = models.TextField(blank=True) is_apply = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) \"\"\"", "'master-purple' } def randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr) - 1 result", "import random from django.db import models from django.contrib.auth.models import User from django.template.loader import", "= '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr) - 1 result = '' for i in", "create_notify(user, post, infomation): new_notify = Notify(user=user, post=post, infomation=infomation) new_notify.save() \"\"\" class Team(models.Model): name", "models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Theme(models.Model): color = models.CharField(max_length=30, unique=True) def", "models.ForeignKey('board.Post', on_delete = models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.user.username class Grade(models.Model):", "models.IntegerField(default=0) view_cnt = models.IntegerField(default=0) hide = models.BooleanField(default=False) notice = models.BooleanField(default=False) block_comment = models.BooleanField(default=False)", "& Blue' : 'glue' } grade_mapping = { 'blogger' : 'blogger-gray', 'contributor' :", "**kwargs): try: this = Profile.objects.get(id=self.id) if this.avatar != self.avatar: this.avatar.delete(save=False) except: pass super(Profile,", "data = {'md': text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text def avatar_path(instance, filename):", "class Theme(models.Model): color = models.CharField(max_length=30, unique=True) def __str__(self): return self.color class Config(models.Model): user", "\"\"\" \"\"\" class MiddleComment(models.Model): pass \"\"\" class History(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post", "= models.TextField(max_length=300) edit = models.BooleanField(default=False) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.text class", "image = models.ImageField(blank=True, upload_to=title_image_path) text_md = models.TextField() text_html = models.TextField() trendy = models.IntegerField(default=0)", "def total_subscriber(self): return self.subscriber.count() def save(self, *args, **kwargs): try: this = Profile.objects.get(id=self.id) if", "unique=True) def __str__(self): return self.color class Config(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) agree_email =", "= models.CharField(max_length=30, blank=True) facebook = models.CharField(max_length=30, blank=True) instagram = models.CharField(max_length=15, blank=True) homepage =", "= models.ForeignKey('board.Post', on_delete = models.CASCADE) comment = models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE) content", "= models.CharField(max_length=50) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) image = models.ImageField(blank=True, upload_to=title_image_path) text_md =", "} def randstr(length): rstr = '0123456789abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLNMOPQRSTUVWXYZ' rstr_len = len(rstr) - 1 result =", "__str__(self): return self.user.username def total_subscriber(self): return self.subscriber.count() def save(self, *args, **kwargs): try: this", "self.user.username class Grade(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Font(models.Model):", "models.SlugField(max_length=50, unique=True, allow_unicode=True) image = models.ImageField(blank=True, upload_to=title_image_path) text_md = models.TextField() text_html = models.TextField()", "blank=True) def __str__(self): return self.user.username def total_subscriber(self): return self.subscriber.count() def save(self, *args, **kwargs):", "models.BooleanField(default=False) post_fonts = models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True) post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True)", "blank=True) twitter = models.CharField(max_length=15, blank=True) youtube = models.CharField(max_length=30, blank=True) facebook = models.CharField(max_length=30, blank=True)", "= models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True) post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True) def __str__(self):", "def __str__(self): return self.post.title class Comment(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post',", "__str__(self): return self.color class Config(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) agree_email = models.BooleanField(default=False) agree_history", "youtube = models.CharField(max_length=30, blank=True) facebook = models.CharField(max_length=30, blank=True) instagram = models.CharField(max_length=15, blank=True) homepage", "models.ManyToManyField(Post, related_name='postlist', blank=True) created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.name def get_absolute_url(self): return", "self.image: this.image.delete(save=False) except: pass super(Post, self).save(*args, **kwargs) class PostLikes(models.Model): class Meta: db_table =", "created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.post.title class Comment(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE)", "this.avatar.delete(save=False) except: pass super(Profile, self).save(*args, **kwargs) class Follow(models.Model): class Meta: db_table = 'board_user_follow'", "models.BooleanField(default=False) block_comment = models.BooleanField(default=False) likes = models.ManyToManyField(User, through='PostLikes', related_name='likes', blank=True) tag = TagField()", "= '' for i in range(length): result += rstr[random.randint(0, rstr_len)] return result def", "unique=True) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) posts = models.ManyToManyField(Post, related_name='postlist', blank=True) created_date =", "__str__(self): return self.name class Font(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name", "= models.DateTimeField(default=timezone.now) def __str__(self): return self.user.username class Grade(models.Model): name = models.CharField(max_length=30, unique=True) def", "**kwargs): try: this = Post.objects.get(id=self.id) if this.image != self.image: this.image.delete(save=False) except: pass super(Post,", "post = models.ForeignKey(Post, on_delete=models.CASCADE) user = models.ForeignKey(User, on_delete=models.CASCADE) created_date = models.DateTimeField(default=timezone.now) def __str__(self):", "from tagging.fields import TagField font_mapping = { 'Noto Sans' : 'noto', 'RIDIBatang' :", "self.name class Font(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Theme(models.Model):", "models.ForeignKey('board.Post', on_delete = models.CASCADE) comment = models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE) content =", "filename): dt = datetime.datetime.now() return 'avatar/u/'+instance.user.username+ '/' + randstr(4) +'.'+filename.split('.')[-1] def title_image_path(instance, filename):", "this.image != self.image: this.image.delete(save=False) except: pass super(Post, self).save(*args, **kwargs) class PostLikes(models.Model): class Meta:", ": 'purple', 'Green & Blue' : 'glue' } grade_mapping = { 'blogger' :", "user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', on_delete = models.CASCADE) created_date = models.DateTimeField(default=timezone.now)", "comment = models.ForeignKey('board.Comment', related_name='request', on_delete = models.CASCADE) content = models.TextField(blank=True) is_apply = models.BooleanField(default=False)", "'supporter-orange', 'sponsor' : 'sponsor-ff69b4', 'partner' : 'partner-blueviolet', 'master' : 'master-purple' } def randstr(length):", "facebook = models.CharField(max_length=30, blank=True) instagram = models.CharField(max_length=15, blank=True) homepage = models.CharField(max_length=100, blank=True) def", "= models.TextField() created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.infomation class Series(models.Model): owner =", "= { 'blogger' : 'blogger-gray', 'contributor' : 'contributor-green', 'supporter' : 'supporter-orange', 'sponsor' :", "blank=True) instagram = models.CharField(max_length=15, blank=True) homepage = models.CharField(max_length=100, blank=True) def __str__(self): return self.user.username", "= models.TextField(blank=True) avatar = models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model): pass class TeamCategory(models.Model): pass \"\"\"", "len(rstr) - 1 result = '' for i in range(length): result += rstr[random.randint(0,", "i in range(length): result += rstr[random.randint(0, rstr_len)] return result def parsedown(text): data =", "null=True) exp = models.IntegerField(default=0) bio = models.TextField(max_length=500, blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path) github =", "= { 'Noto Sans' : 'noto', 'RIDIBatang' : 'ridi', 'Noto Sans Serif' :", "Sans' : 'noto', 'RIDIBatang' : 'ridi', 'Noto Sans Serif' : 'serif' } theme_mapping", "Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True) grade =", "models.ForeignKey('board.Font', on_delete=models.CASCADE, blank=True, null=True) post_theme = models.ForeignKey('board.Theme', on_delete=models.CASCADE, blank=True, null=True) def __str__(self): return", "auto_created = True following = models.ForeignKey(Profile, on_delete=models.CASCADE) follower = models.ForeignKey(User, on_delete=models.CASCADE) created_date =", "= {'md': text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text def avatar_path(instance, filename): dt", "blank=True) facebook = models.CharField(max_length=30, blank=True) instagram = models.CharField(max_length=15, blank=True) homepage = models.CharField(max_length=100, blank=True)", "class Meta: db_table = 'board_post_likes' auto_created = True post = models.ForeignKey(Post, on_delete=models.CASCADE) user", "models.DateTimeField(default=timezone.now) def __str__(self): return self.text class Notify(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post =", "!= self.avatar: this.avatar.delete(save=False) except: pass super(Profile, self).save(*args, **kwargs) class Follow(models.Model): class Meta: db_table", "= models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE) text = models.TextField(max_length=300) edit = models.BooleanField(default=False) created_date", "'', 'Dark Mode' : 'dark', 'Violet' : 'purple', 'Green & Blue' : 'glue'", "about = models.TextField(blank=True) avatar = models.ImageField(blank=True, upload_to=team_logo_path) class TeamPost(models.Model): pass class TeamCategory(models.Model): pass", "url = models.SlugField(max_length=50, unique=True, allow_unicode=True) posts = models.ManyToManyField(Post, related_name='postlist', blank=True) created_date = models.DateTimeField(default=timezone.now)", "def __str__(self): return self.infomation class Series(models.Model): owner = models.ForeignKey('auth.User', on_delete=models.CASCADE) name = models.CharField(max_length=50,", "def __str__(self): return self.color class Config(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) agree_email = models.BooleanField(default=False)", "args=[self.author, self.url]) def total_likes(self): return self.likes.count() def save(self, *args, **kwargs): try: this =", "'glue' } grade_mapping = { 'blogger' : 'blogger-gray', 'contributor' : 'contributor-green', 'supporter' :", "models.ForeignKey('board.Post', related_name='comments', on_delete = models.CASCADE) text = models.TextField(max_length=300) edit = models.BooleanField(default=False) created_date =", "blank=True) tag = TagField() created_date = models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now) def __str__(self): return", "models.CharField(max_length=50) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) image = models.ImageField(blank=True, upload_to=title_image_path) text_md = models.TextField()", "'contributor' : 'contributor-green', 'supporter' : 'supporter-orange', 'sponsor' : 'sponsor-ff69b4', 'partner' : 'partner-blueviolet', 'master'", "updated_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.title def get_absolute_url(self): return reverse('post_detail', args=[self.author, self.url])", "return self.color class Config(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) agree_email = models.BooleanField(default=False) agree_history =", "'dark', 'Violet' : 'purple', 'Green & Blue' : 'glue' } grade_mapping = {", "= models.ManyToManyField(User, related_name='members', blank=True) bio = models.TextField(max_length=500, blank=True) about = models.TextField(blank=True) avatar =", "tag = TagField() created_date = models.DateTimeField(default=timezone.now) updated_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.title", "models.ForeignKey('auth.User') member = models.ManyToManyField(User, related_name='members', blank=True) bio = models.TextField(max_length=500, blank=True) about = models.TextField(blank=True)", "return self.post.title class Comment(models.Model): author = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post', related_name='comments', on_delete", "= models.ForeignKey('auth.User') member = models.ManyToManyField(User, related_name='members', blank=True) bio = models.TextField(max_length=500, blank=True) about =", "Grade(models.Model): name = models.CharField(max_length=30, unique=True) def __str__(self): return self.name class Font(models.Model): name =", "bio = models.TextField(max_length=500, blank=True) avatar = models.ImageField(blank=True,upload_to=avatar_path) github = models.CharField(max_length=15, blank=True) twitter =", "unique=True) owner = models.ForeignKey('auth.User') member = models.ManyToManyField(User, related_name='members', blank=True) bio = models.TextField(max_length=500, blank=True)", "result = '' for i in range(length): result += rstr[random.randint(0, rstr_len)] return result", "blank=True) homepage = models.CharField(max_length=100, blank=True) def __str__(self): return self.user.username def total_subscriber(self): return self.subscriber.count()", "save(self, *args, **kwargs): try: this = Post.objects.get(id=self.id) if this.image != self.image: this.image.delete(save=False) except:", "title = models.CharField(max_length=50) url = models.SlugField(max_length=50, unique=True, allow_unicode=True) image = models.ImageField(blank=True, upload_to=title_image_path) text_md", "def get_absolute_url(self): return reverse('post_detail', args=[self.author, self.url]) def total_likes(self): return self.likes.count() def save(self, *args,", "class PostLikes(models.Model): class Meta: db_table = 'board_post_likes' auto_created = True post = models.ForeignKey(Post,", "text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text def avatar_path(instance, filename): dt = datetime.datetime.now()", "= models.CharField(max_length=30, blank=True) instagram = models.CharField(max_length=15, blank=True) homepage = models.CharField(max_length=100, blank=True) def __str__(self):", "= models.DateTimeField(default=timezone.now) def __str__(self): return self.title def get_absolute_url(self): return reverse('post_detail', args=[self.author, self.url]) def", "PostLikes(models.Model): class Meta: db_table = 'board_post_likes' auto_created = True post = models.ForeignKey(Post, on_delete=models.CASCADE)", "parsedown(text): data = {'md': text.encode('utf-8')} res = requests.post('http://baealex.dothome.co.kr/api/parsedown/get.php', data=data) return res.text def avatar_path(instance,", "TeamCategory(models.Model): pass \"\"\" \"\"\" class Request(models.Model): user = models.ForeignKey('auth.User', on_delete=models.CASCADE) post = models.ForeignKey('board.Post',", "subscriber = models.ManyToManyField(User, through='Follow', related_name='subscriber', blank=True) grade = models.ForeignKey('board.Grade', on_delete=models.CASCADE, blank=True, null=True) exp", "created_date = models.DateTimeField(default=timezone.now) def __str__(self): return self.infomation class Series(models.Model): owner = models.ForeignKey('auth.User', on_delete=models.CASCADE)" ]
[]
[ "Switch class SwitchApp(App): def build(self): switch = Switch() switch.bind(active=self.switch_state) return switch def switch_state(self,", "import App from kivy.uix.switch import Switch class SwitchApp(App): def build(self): switch = Switch()", "kivy.uix.switch import Switch class SwitchApp(App): def build(self): switch = Switch() switch.bind(active=self.switch_state) return switch", "from kivy.app import App from kivy.uix.switch import Switch class SwitchApp(App): def build(self): switch", "<gh_stars>1-10 from kivy.app import App from kivy.uix.switch import Switch class SwitchApp(App): def build(self):", "SwitchApp(App): def build(self): switch = Switch() switch.bind(active=self.switch_state) return switch def switch_state(self, instance, value):", "build(self): switch = Switch() switch.bind(active=self.switch_state) return switch def switch_state(self, instance, value): print('Switch is',", "def build(self): switch = Switch() switch.bind(active=self.switch_state) return switch def switch_state(self, instance, value): print('Switch", "from kivy.uix.switch import Switch class SwitchApp(App): def build(self): switch = Switch() switch.bind(active=self.switch_state) return", "class SwitchApp(App): def build(self): switch = Switch() switch.bind(active=self.switch_state) return switch def switch_state(self, instance,", "App from kivy.uix.switch import Switch class SwitchApp(App): def build(self): switch = Switch() switch.bind(active=self.switch_state)", "switch = Switch() switch.bind(active=self.switch_state) return switch def switch_state(self, instance, value): print('Switch is', value)", "kivy.app import App from kivy.uix.switch import Switch class SwitchApp(App): def build(self): switch =", "import Switch class SwitchApp(App): def build(self): switch = Switch() switch.bind(active=self.switch_state) return switch def", "= Switch() switch.bind(active=self.switch_state) return switch def switch_state(self, instance, value): print('Switch is', value) SwitchApp().run()" ]
[ "# @lc app=leetcode.cn id=559 lang=python3 # # [559] N叉树的最大深度 # class Node: def", "int: if root is None: return 0 # 没有子树,只有根节点1层 if root.children is None", "Node: def __init__(self, val, children): self.val = val self.children = children class Solution:", "<reponame>fengbaoheng/leetcode # # @lc app=leetcode.cn id=559 lang=python3 # # [559] N叉树的最大深度 # class", "maxDepth(self, root: 'Node') -> int: if root is None: return 0 # 没有子树,只有根节点1层", "class Node: def __init__(self, val, children): self.val = val self.children = children class", "# 递归子树求深度 def maxDepth(self, root: 'Node') -> int: if root is None: return", "if root is None: return 0 # 没有子树,只有根节点1层 if root.children is None or", "# # [559] N叉树的最大深度 # class Node: def __init__(self, val, children): self.val =", "@lc app=leetcode.cn id=559 lang=python3 # # [559] N叉树的最大深度 # class Node: def __init__(self,", "self.val = val self.children = children class Solution: # 递归子树求深度 def maxDepth(self, root:", "or len(root.children) == 0: return 1 # 对每颗子树递归调用求最大深度 return 1 + max(map(self.maxDepth, root.children))", "self.children = children class Solution: # 递归子树求深度 def maxDepth(self, root: 'Node') -> int:", "root: 'Node') -> int: if root is None: return 0 # 没有子树,只有根节点1层 if", "None: return 0 # 没有子树,只有根节点1层 if root.children is None or len(root.children) == 0:", "is None or len(root.children) == 0: return 1 # 对每颗子树递归调用求最大深度 return 1 +", "# [559] N叉树的最大深度 # class Node: def __init__(self, val, children): self.val = val", "= val self.children = children class Solution: # 递归子树求深度 def maxDepth(self, root: 'Node')", "val self.children = children class Solution: # 递归子树求深度 def maxDepth(self, root: 'Node') ->", "# class Node: def __init__(self, val, children): self.val = val self.children = children", "class Solution: # 递归子树求深度 def maxDepth(self, root: 'Node') -> int: if root is", "Solution: # 递归子树求深度 def maxDepth(self, root: 'Node') -> int: if root is None:", "lang=python3 # # [559] N叉树的最大深度 # class Node: def __init__(self, val, children): self.val", "N叉树的最大深度 # class Node: def __init__(self, val, children): self.val = val self.children =", "is None: return 0 # 没有子树,只有根节点1层 if root.children is None or len(root.children) ==", "app=leetcode.cn id=559 lang=python3 # # [559] N叉树的最大深度 # class Node: def __init__(self, val,", "0 # 没有子树,只有根节点1层 if root.children is None or len(root.children) == 0: return 1", "children): self.val = val self.children = children class Solution: # 递归子树求深度 def maxDepth(self,", "root.children is None or len(root.children) == 0: return 1 # 对每颗子树递归调用求最大深度 return 1", "= children class Solution: # 递归子树求深度 def maxDepth(self, root: 'Node') -> int: if", "def __init__(self, val, children): self.val = val self.children = children class Solution: #", "def maxDepth(self, root: 'Node') -> int: if root is None: return 0 #", "return 0 # 没有子树,只有根节点1层 if root.children is None or len(root.children) == 0: return", "if root.children is None or len(root.children) == 0: return 1 # 对每颗子树递归调用求最大深度 return", "[559] N叉树的最大深度 # class Node: def __init__(self, val, children): self.val = val self.children", "__init__(self, val, children): self.val = val self.children = children class Solution: # 递归子树求深度", "# # @lc app=leetcode.cn id=559 lang=python3 # # [559] N叉树的最大深度 # class Node:", "id=559 lang=python3 # # [559] N叉树的最大深度 # class Node: def __init__(self, val, children):", "-> int: if root is None: return 0 # 没有子树,只有根节点1层 if root.children is", "val, children): self.val = val self.children = children class Solution: # 递归子树求深度 def", "root is None: return 0 # 没有子树,只有根节点1层 if root.children is None or len(root.children)", "# 没有子树,只有根节点1层 if root.children is None or len(root.children) == 0: return 1 #", "children class Solution: # 递归子树求深度 def maxDepth(self, root: 'Node') -> int: if root", "'Node') -> int: if root is None: return 0 # 没有子树,只有根节点1层 if root.children", "递归子树求深度 def maxDepth(self, root: 'Node') -> int: if root is None: return 0", "None or len(root.children) == 0: return 1 # 对每颗子树递归调用求最大深度 return 1 + max(map(self.maxDepth,", "没有子树,只有根节点1层 if root.children is None or len(root.children) == 0: return 1 # 对每颗子树递归调用求最大深度" ]
[ "\"0.0.0.0:8000\" # workers = multiprocessing.cpu_count() * 2 + 1 workers = 2 threads", "multiprocessing bind = \"0.0.0.0:8000\" # workers = multiprocessing.cpu_count() * 2 + 1 workers", "import multiprocessing bind = \"0.0.0.0:8000\" # workers = multiprocessing.cpu_count() * 2 + 1", "# workers = multiprocessing.cpu_count() * 2 + 1 workers = 2 threads =", "= multiprocessing.cpu_count() * 2 + 1 workers = 2 threads = 2 backlog", "= \"0.0.0.0:8000\" # workers = multiprocessing.cpu_count() * 2 + 1 workers = 2", "# import multiprocessing bind = \"0.0.0.0:8000\" # workers = multiprocessing.cpu_count() * 2 +", "workers = multiprocessing.cpu_count() * 2 + 1 workers = 2 threads = 2", "* 2 + 1 workers = 2 threads = 2 backlog = 4096", "multiprocessing.cpu_count() * 2 + 1 workers = 2 threads = 2 backlog =", "bind = \"0.0.0.0:8000\" # workers = multiprocessing.cpu_count() * 2 + 1 workers =" ]
[ "GetServiceDefinition class TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method = GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection)", "import TestBase from healthvaultlib.methods.getservicedefinition import GetServiceDefinition class TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method = GetServiceDefinition(['platform',", "'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform) self.assertIsNotNone(method.response.service_definition.shell) self.assertNotEqual(len(method.response.service_definition.xml_method), 0) self.assertNotEqual(len(method.response.service_definition.common_schema), 0) self.assertNotEqual(len(method.response.service_definition.instances), 0)", "TestBase from healthvaultlib.methods.getservicedefinition import GetServiceDefinition class TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method = GetServiceDefinition(['platform', 'shell',", "'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform) self.assertIsNotNone(method.response.service_definition.shell) self.assertNotEqual(len(method.response.service_definition.xml_method), 0) self.assertNotEqual(len(method.response.service_definition.common_schema), 0) self.assertNotEqual(len(method.response.service_definition.instances), 0) self.assertIsNotNone(method.response.service_definition.meaningful_use)", "def test_getservicedefinition(self): method = GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform) self.assertIsNotNone(method.response.service_definition.shell)", "from healthvaultlib.tests.testbase import TestBase from healthvaultlib.methods.getservicedefinition import GetServiceDefinition class TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method", "test_getservicedefinition(self): method = GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform) self.assertIsNotNone(method.response.service_definition.shell) self.assertNotEqual(len(method.response.service_definition.xml_method),", "'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform) self.assertIsNotNone(method.response.service_definition.shell) self.assertNotEqual(len(method.response.service_definition.xml_method), 0) self.assertNotEqual(len(method.response.service_definition.common_schema), 0) self.assertNotEqual(len(method.response.service_definition.instances), 0) self.assertIsNotNone(method.response.service_definition.meaningful_use) self.assertIsNotNone(method.response.service_definition.updated_date)", "TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method = GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform)", "class TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method = GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response)", "GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform) self.assertIsNotNone(method.response.service_definition.shell) self.assertNotEqual(len(method.response.service_definition.xml_method), 0) self.assertNotEqual(len(method.response.service_definition.common_schema), 0)", "from healthvaultlib.methods.getservicedefinition import GetServiceDefinition class TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method = GetServiceDefinition(['platform', 'shell', 'topology',", "method = GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform) self.assertIsNotNone(method.response.service_definition.shell) self.assertNotEqual(len(method.response.service_definition.xml_method), 0)", "import GetServiceDefinition class TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method = GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use'])", "'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform) self.assertIsNotNone(method.response.service_definition.shell) self.assertNotEqual(len(method.response.service_definition.xml_method), 0) self.assertNotEqual(len(method.response.service_definition.common_schema), 0) self.assertNotEqual(len(method.response.service_definition.instances),", "healthvaultlib.tests.testbase import TestBase from healthvaultlib.methods.getservicedefinition import GetServiceDefinition class TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method =", "healthvaultlib.methods.getservicedefinition import GetServiceDefinition class TestGetServiceDefinition(TestBase): def test_getservicedefinition(self): method = GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods',", "= GetServiceDefinition(['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']) method.execute(self.connection) self.assertIsNotNone(method.response) self.assertIsNotNone(method.response.service_definition.platform) self.assertIsNotNone(method.response.service_definition.shell) self.assertNotEqual(len(method.response.service_definition.xml_method), 0) self.assertNotEqual(len(method.response.service_definition.common_schema)," ]
[ "=1 pi = math.pi inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius)", "inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR", "#CLEAR SCREEN os.system(\"cls\") #PRINT DATA print(\" Radius:\", inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\",", "@IntSPstudio #|==============================================================|# #SYSTEM import os import sys #import time import turtle import math", "print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl) print(\" Slice Rl:\", inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl", "julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius) julle.right(180 - inputCircleSliceAngle) julle.forward(inputCircleRadius) julle.right(180) julle.forward(inputCircleRadius) #Wait contentscreen.mainloop()", "%Rld:\", inputCircleSliceRl / inputCircleRl *100) #ACTION #Start position julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle", "#TURTLE julle = turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT scriptFle = sys.argv[0] scriptCircleRadius = sys.argv[1]", "# Project Visual Street # ID: 980004006 # Twitter: @IntSPstudio #|==============================================================|# #SYSTEM import", "Street # ID: 980004006 # Twitter: @IntSPstudio #|==============================================================|# #SYSTEM import os import sys", "inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\") #PRINT", "= 2*pi*rlRadius return output #Laskee piiraan kehän koon def calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2", "math #ALG #Ympyrän kehän koko def calcCircleRl(rlRadius): #2PIR output = 2*pi*rlRadius return output", "contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle = turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT scriptFle =", "Rl:\", inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl / inputCircleRl *100) #ACTION #Start position julle.penup() julle.forward(inputCircleRadius)", "2*pi*rlRadius return output #Laskee piiraan kehän koon def calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2 return", "= sys.argv[1] scriptCircleSliceAngle = sys.argv[2] #BASIC VRB #systemContinuity =1 pi = math.pi inputCircleRadius", "julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius) julle.right(180 - inputCircleSliceAngle) julle.forward(inputCircleRadius) julle.right(180)", "math.pi inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius)", "print(\" Slice Rl:\", inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl / inputCircleRl *100) #ACTION #Start position", "int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\")", "contentscreen.bgcolor(\"black\") #TURTLE julle = turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT scriptFle = sys.argv[0] scriptCircleRadius =", "ID: 980004006 # Twitter: @IntSPstudio #|==============================================================|# #SYSTEM import os import sys #import time", "Rl:\", inputCircleRl) print(\" Slice Rl:\", inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl / inputCircleRl *100) #ACTION", "position julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius) julle.right(180 -", "980004006 # Twitter: @IntSPstudio #|==============================================================|# #SYSTEM import os import sys #import time import", "julle = turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT scriptFle = sys.argv[0] scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle", "Visual Street # ID: 980004006 # Twitter: @IntSPstudio #|==============================================================|# #SYSTEM import os import", "VRB #systemContinuity =1 pi = math.pi inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl", "Twitter: @IntSPstudio #|==============================================================|# #SYSTEM import os import sys #import time import turtle import", "SCREEN os.system(\"cls\") #PRINT DATA print(\" Radius:\", inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl)", "julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius) julle.right(180 - inputCircleSliceAngle)", "inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\") #PRINT DATA print(\" Radius:\", inputCircleRadius) print(\" Slice:\",", "Made by IntSPstudio # Project Visual Street # ID: 980004006 # Twitter: @IntSPstudio", "= math.pi inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl =", "IntSPstudio # Project Visual Street # ID: 980004006 # Twitter: @IntSPstudio #|==============================================================|# #SYSTEM", "#PRINT DATA print(\" Radius:\", inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl) print(\" Slice", "#Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius) julle.right(180 - inputCircleSliceAngle) julle.forward(inputCircleRadius) julle.right(180) julle.forward(inputCircleRadius) #Wait contentscreen.mainloop() os.system(\"cls\")", "turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle = turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT scriptFle = sys.argv[0] scriptCircleRadius", "#Ympyrän kehän koko def calcCircleRl(rlRadius): #2PIR output = 2*pi*rlRadius return output #Laskee piiraan", "scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle = sys.argv[2] #BASIC VRB #systemContinuity =1 pi = math.pi", "def calcCircleRl(rlRadius): #2PIR output = 2*pi*rlRadius return output #Laskee piiraan kehän koon def", "Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl) print(\" Slice Rl:\", inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl /", "calcCircleRl(rlRadius): #2PIR output = 2*pi*rlRadius return output #Laskee piiraan kehän koon def calcCircleSliceRl(rlAngle,rlRadius):", "return output #CONTENT SCREEN contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle = turtle.Turtle() julle.color(\"white\")", "import math #ALG #Ympyrän kehän koko def calcCircleRl(rlRadius): #2PIR output = 2*pi*rlRadius return", "inputCircleRl *100) #ACTION #Start position julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice julle.pendown()", "= turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT scriptFle = sys.argv[0] scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle =", "= int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\") #PRINT DATA", "scriptCircleSliceAngle = sys.argv[2] #BASIC VRB #systemContinuity =1 pi = math.pi inputCircleRadius = int(scriptCircleRadius)", "= rlAngle/360*pi*rlRadius*2 return output #CONTENT SCREEN contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle =", "print(\" %Rld:\", inputCircleSliceRl / inputCircleRl *100) #ACTION #Start position julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown()", "calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\") #PRINT DATA print(\" Radius:\", inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle", "#|==============================================================|# # Made by IntSPstudio # Project Visual Street # ID: 980004006 #", "# ID: 980004006 # Twitter: @IntSPstudio #|==============================================================|# #SYSTEM import os import sys #import", "by IntSPstudio # Project Visual Street # ID: 980004006 # Twitter: @IntSPstudio #|==============================================================|#", "DATA print(\" Radius:\", inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl) print(\" Slice Rl:\",", "#Start position julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius) julle.right(180", "inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\") #PRINT DATA print(\" Radius:\",", "Project Visual Street # ID: 980004006 # Twitter: @IntSPstudio #|==============================================================|# #SYSTEM import os", "time import turtle import math #ALG #Ympyrän kehän koko def calcCircleRl(rlRadius): #2PIR output", "julle.speed(5) #INPUT scriptFle = sys.argv[0] scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle = sys.argv[2] #BASIC VRB", "int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\") #PRINT DATA print(\"", "*100) #ACTION #Start position julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90)", "output = 2*pi*rlRadius return output #Laskee piiraan kehän koon def calcCircleSliceRl(rlAngle,rlRadius): output =", "#CONTENT SCREEN contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle = turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT", "#Circle julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius) julle.right(180 - inputCircleSliceAngle) julle.forward(inputCircleRadius) julle.right(180) julle.forward(inputCircleRadius) #Wait", "output #Laskee piiraan kehän koon def calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2 return output #CONTENT", "#ALG #Ympyrän kehän koko def calcCircleRl(rlRadius): #2PIR output = 2*pi*rlRadius return output #Laskee", "koko def calcCircleRl(rlRadius): #2PIR output = 2*pi*rlRadius return output #Laskee piiraan kehän koon", "scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl) print(\" Slice Rl:\", inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl / inputCircleRl", "rlAngle/360*pi*rlRadius*2 return output #CONTENT SCREEN contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle = turtle.Turtle()", "calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2 return output #CONTENT SCREEN contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE", "inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl) print(\" Slice Rl:\", inputCircleSliceRl) print(\" %Rld:\",", "os import sys #import time import turtle import math #ALG #Ympyrän kehän koko", "Radius:\", inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl) print(\" Slice Rl:\", inputCircleSliceRl) print(\"", "inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl / inputCircleRl *100) #ACTION #Start position julle.penup() julle.forward(inputCircleRadius) julle.left(90)", "sys.argv[2] #BASIC VRB #systemContinuity =1 pi = math.pi inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle =", "return output #Laskee piiraan kehän koon def calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2 return output", "= turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle = turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT scriptFle = sys.argv[0]", "#INPUT scriptFle = sys.argv[0] scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle = sys.argv[2] #BASIC VRB #systemContinuity", "= calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\") #PRINT DATA print(\" Radius:\", inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle)", "#systemContinuity =1 pi = math.pi inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl =", "#|==============================================================|# #SYSTEM import os import sys #import time import turtle import math #ALG", "piiraan kehän koon def calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2 return output #CONTENT SCREEN contentscreen", "koon def calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2 return output #CONTENT SCREEN contentscreen = turtle.Screen()", "SCREEN contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle = turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT scriptFle", "# Twitter: @IntSPstudio #|==============================================================|# #SYSTEM import os import sys #import time import turtle", "calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\") #PRINT DATA print(\" Radius:\", inputCircleRadius) print(\"", "#ACTION #Start position julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius)", "inputCircleRl) print(\" Slice Rl:\", inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl / inputCircleRl *100) #ACTION #Start", "import os import sys #import time import turtle import math #ALG #Ympyrän kehän", "inputCircleSliceRl / inputCircleRl *100) #ACTION #Start position julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius)", "import sys #import time import turtle import math #ALG #Ympyrän kehän koko def", "Slice Rl:\", inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl / inputCircleRl *100) #ACTION #Start position julle.penup()", "kehän koko def calcCircleRl(rlRadius): #2PIR output = 2*pi*rlRadius return output #Laskee piiraan kehän", "julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius) julle.right(180 - inputCircleSliceAngle) julle.forward(inputCircleRadius) julle.right(180) julle.forward(inputCircleRadius)", "#import time import turtle import math #ALG #Ympyrän kehän koko def calcCircleRl(rlRadius): #2PIR", "kehän koon def calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2 return output #CONTENT SCREEN contentscreen =", "= sys.argv[2] #BASIC VRB #systemContinuity =1 pi = math.pi inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle", "output = rlAngle/360*pi*rlRadius*2 return output #CONTENT SCREEN contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle", "sys.argv[0] scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle = sys.argv[2] #BASIC VRB #systemContinuity =1 pi =", "sys.argv[1] scriptCircleSliceAngle = sys.argv[2] #BASIC VRB #systemContinuity =1 pi = math.pi inputCircleRadius =", "os.system(\"cls\") #PRINT DATA print(\" Radius:\", inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl) print(\"", "output #CONTENT SCREEN contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\") #TURTLE julle = turtle.Turtle() julle.color(\"white\") julle.speed(5)", "/ inputCircleRl *100) #ACTION #Start position julle.penup() julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice", "= int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN", "import turtle import math #ALG #Ympyrän kehän koko def calcCircleRl(rlRadius): #2PIR output =", "= sys.argv[0] scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle = sys.argv[2] #BASIC VRB #systemContinuity =1 pi", "#BASIC VRB #systemContinuity =1 pi = math.pi inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle)", "turtle.Turtle() julle.color(\"white\") julle.speed(5) #INPUT scriptFle = sys.argv[0] scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle = sys.argv[2]", "#SYSTEM import os import sys #import time import turtle import math #ALG #Ympyrän", "sys #import time import turtle import math #ALG #Ympyrän kehän koko def calcCircleRl(rlRadius):", "def calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2 return output #CONTENT SCREEN contentscreen = turtle.Screen() contentscreen.bgcolor(\"black\")", "#Laskee piiraan kehän koon def calcCircleSliceRl(rlAngle,rlRadius): output = rlAngle/360*pi*rlRadius*2 return output #CONTENT SCREEN", "print(\" Radius:\", inputCircleRadius) print(\" Slice:\", scriptCircleSliceAngle) print(\"Circle Rl:\", inputCircleRl) print(\" Slice Rl:\", inputCircleSliceRl)", "turtle import math #ALG #Ympyrän kehän koko def calcCircleRl(rlRadius): #2PIR output = 2*pi*rlRadius", "#2PIR output = 2*pi*rlRadius return output #Laskee piiraan kehän koon def calcCircleSliceRl(rlAngle,rlRadius): output", "julle.color(\"white\") julle.speed(5) #INPUT scriptFle = sys.argv[0] scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle = sys.argv[2] #BASIC", "scriptFle = sys.argv[0] scriptCircleRadius = sys.argv[1] scriptCircleSliceAngle = sys.argv[2] #BASIC VRB #systemContinuity =1", "= calcCircleRl(inputCircleRadius) inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius) #CLEAR SCREEN os.system(\"cls\") #PRINT DATA print(\" Radius:\", inputCircleRadius)", "print(\"Circle Rl:\", inputCircleRl) print(\" Slice Rl:\", inputCircleSliceRl) print(\" %Rld:\", inputCircleSliceRl / inputCircleRl *100)", "pi = math.pi inputCircleRadius = int(scriptCircleRadius) inputCircleSliceAngle = int(scriptCircleSliceAngle) inputCircleRl = calcCircleRl(inputCircleRadius) inputCircleSliceRl", "julle.forward(inputCircleRadius) julle.left(90) julle.pendown() #Circle julle.circle(inputCircleRadius) #Slice julle.pendown() julle.left(90) julle.forward(inputCircleRadius) julle.right(180 - inputCircleSliceAngle) julle.forward(inputCircleRadius)", "# Made by IntSPstudio # Project Visual Street # ID: 980004006 # Twitter:" ]
[ "based on HSV range provided by COLOR_MIN and COLOR_MAX frame = cv2.inRange(hsv, COLOR_MIN,", "MIN_BOIL_AREA = 100 def cart2pol(a): x = a[0] y = a[1] rho =", "filteredContours[iTargetContour] M = cv2.moments(targetContour) cX = int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] /", "int(M[\"m01\"] / M[\"m00\"]) print(cX, \" \", cY) table.putNumber('cX', cX) table.putNumber('cY', cY) else: #", "x = rho * np.cos(phi) y = rho * np.sin(phi) return([x, y]) def", "if DEBUG: cv2.imshow('frame', frame) # find contours based on thresholded image _, contours,", "maxRightness = 0; # searches for index of most rightward contours for i", "150 MIN_BOIL_AREA = 100 def cart2pol(a): x = a[0] y = a[1] rho", "maxRightness = cX iTargetContour = i targetContour = filteredContours[iTargetContour] M = cv2.moments(targetContour) cX", "cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates array of contours larger than given min area", "return([rho, phi]) def pol2cart(a): rho = a[0] phi = a[1] x = rho", "cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates array of contours larger than given min area filteredContours", "rho * np.cos(phi) y = rho * np.sin(phi) return([x, y]) def trackHook(): #", "1 DEBUG = False # HOOK_TARGET_LENGTH = 51 # width of retroreflective tape,", "-1) # end of trackHook() cap = cv2.VideoCapture(HOOK_CAM_ID) while True: trackHook() if DEBUG:", "pol2cart(a): rho = a[0] phi = a[1] x = rho * np.cos(phi) y", "fx=0.5, fy=0.5) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg', frame) #", "COLOR_MIN and COLOR_MAX frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if DEBUG: cv2.imshow('frame', frame) #", "cX = int(M[\"m10\"] / M[\"m00\"]) if cX > maxRightness: maxRightness = cX iTargetContour", "array of contours larger than given min area filteredContours = [] for i", "# analyze centre X M = cv2.moments(filteredContours[i]) cX = int(M[\"m10\"] / M[\"m00\"]) if", "else: # if no contours found table.putNumber('cX', -1) table.putNumber('cY', -1) # end of", "x value iTargetContour = 0; maxRightness = 0; # searches for index of", "\", cY) table.putNumber('cX', cX) table.putNumber('cY', cY) else: # if no contours found table.putNumber('cX',", "width of retroreflective tape, in cm MIN_HOOK_AREA = 150 MIN_BOIL_AREA = 100 def", "filteredContours = [] for i in range(0, len(contours)): if cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i])", "a[1] x = rho * np.cos(phi) y = rho * np.sin(phi) return([x, y])", "y]) def trackHook(): # read image from camera, resize to 320x240, convert to", "NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60, 80, 80]) COLOR_MAX = np.array([85,", "range provided by COLOR_MIN and COLOR_MAX frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if DEBUG:", "(highest x-val) contour from filtered contours if len(filteredContours) > 0: # default index", "/ M[\"m00\"]) print(cX, \" \", cY) table.putNumber('cX', cX) table.putNumber('cY', cY) else: # if", "cm MIN_HOOK_AREA = 150 MIN_BOIL_AREA = 100 def cart2pol(a): x = a[0] y", "(for lifecam 3000) FOV_PIXEL = 320 HOOK_CAM_ID = 0 BOIL_CAM_ID = 1 DEBUG", "= cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if DEBUG: cv2.imshow('frame', frame) # find contours based on", "a[0] phi = a[1] x = rho * np.cos(phi) y = rho *", "in range(0, len(filteredContours)): # analyze centre X M = cv2.moments(filteredContours[i]) cX = int(M[\"m10\"]", "for i in range(0, len(contours)): if cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds most", "int(M[\"m10\"] / M[\"m00\"]) if cX > maxRightness: maxRightness = cX iTargetContour = i", "numpy as np import cv2 from networktables import NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\")", "HOOK_CAM_ID = 0 BOIL_CAM_ID = 1 DEBUG = False # HOOK_TARGET_LENGTH = 51", "value iTargetContour = 0; maxRightness = 0; # searches for index of most", "if no contours found table.putNumber('cX', -1) table.putNumber('cY', -1) # end of trackHook() cap", "rho = a[0] phi = a[1] x = rho * np.cos(phi) y =", "contour from filtered contours if len(filteredContours) > 0: # default index and x", "0; # searches for index of most rightward contours for i in range(0,", "i in range(0, len(filteredContours)): # analyze centre X M = cv2.moments(filteredContours[i]) cX =", "np import cv2 from networktables import NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize()", "100 def cart2pol(a): x = a[0] y = a[1] rho = np.sqrt(x**2 +", "frame = cap.read() frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG:", "/ 6.283185307 # (for lifecam 3000) FOV_PIXEL = 320 HOOK_CAM_ID = 0 BOIL_CAM_ID", "camera, resize to 320x240, convert to HSV ret, frame = cap.read() frame=cv2.resize(frame, (0,0),", "FOV_PIXEL = 320 HOOK_CAM_ID = 0 BOIL_CAM_ID = 1 DEBUG = False #", "# find contours based on thresholded image _, contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL,", "# read image from camera, resize to 320x240, convert to HSV ret, frame", "* 360 / 6.283185307 # (for lifecam 3000) FOV_PIXEL = 320 HOOK_CAM_ID =", "<filename>OpenCV/goalTracker.py import os import numpy as np import cv2 from networktables import NetworkTable", "3000) FOV_PIXEL = 320 HOOK_CAM_ID = 0 BOIL_CAM_ID = 1 DEBUG = False", "range(0, len(filteredContours)): # analyze centre X M = cv2.moments(filteredContours[i]) cX = int(M[\"m10\"] /", "cv2.moments(filteredContours[i]) cX = int(M[\"m10\"] / M[\"m00\"]) if cX > maxRightness: maxRightness = cX", "cX iTargetContour = i targetContour = filteredContours[iTargetContour] M = cv2.moments(targetContour) cX = int(M[\"m10\"]", "and x value iTargetContour = 0; maxRightness = 0; # searches for index", "x) return([rho, phi]) def pol2cart(a): rho = a[0] phi = a[1] x =", "def trackHook(): # read image from camera, resize to 320x240, convert to HSV", "x-val) contour from filtered contours if len(filteredContours) > 0: # default index and", "= i targetContour = filteredContours[iTargetContour] M = cv2.moments(targetContour) cX = int(M[\"m10\"] / M[\"m00\"])", "table.putNumber('cY', -1) # end of trackHook() cap = cv2.VideoCapture(HOOK_CAM_ID) while True: trackHook() if", "searches for index of most rightward contours for i in range(0, len(filteredContours)): #", "import cv2 from networktables import NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table", "DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg', frame) # threshold image based on HSV range provided", "> MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds most rightward (highest x-val) contour from filtered contours", "* np.cos(phi) y = rho * np.sin(phi) return([x, y]) def trackHook(): # read", "to HSV ret, frame = cap.read() frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5) hsv = cv2.cvtColor(frame,", "= int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) print(cX, \" \", cY)", "_, contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates array of contours larger", "in range(0, len(contours)): if cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds most rightward (highest", "80, 80]) COLOR_MAX = np.array([85, 255, 255]) VIEW_ANGLE = 60 * 360 /", "import numpy as np import cv2 from networktables import NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\")", "analyze centre X M = cv2.moments(filteredContours[i]) cX = int(M[\"m10\"] / M[\"m00\"]) if cX", "320 HOOK_CAM_ID = 0 BOIL_CAM_ID = 1 DEBUG = False # HOOK_TARGET_LENGTH =", "end of trackHook() cap = cv2.VideoCapture(HOOK_CAM_ID) while True: trackHook() if DEBUG: cap.release() cv2.destroyAllWindows()", "COLOR_MIN = np.array([60, 80, 80]) COLOR_MAX = np.array([85, 255, 255]) VIEW_ANGLE = 60", "heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates array of contours larger than given", "= np.array([85, 255, 255]) VIEW_ANGLE = 60 * 360 / 6.283185307 # (for", "X M = cv2.moments(filteredContours[i]) cX = int(M[\"m10\"] / M[\"m00\"]) if cX > maxRightness:", "provided by COLOR_MIN and COLOR_MAX frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if DEBUG: cv2.imshow('frame',", "cY = int(M[\"m01\"] / M[\"m00\"]) print(cX, \" \", cY) table.putNumber('cX', cX) table.putNumber('cY', cY)", "False # HOOK_TARGET_LENGTH = 51 # width of retroreflective tape, in cm MIN_HOOK_AREA", "= 320 HOOK_CAM_ID = 0 BOIL_CAM_ID = 1 DEBUG = False # HOOK_TARGET_LENGTH", "from networktables import NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\")", "np.cos(phi) y = rho * np.sin(phi) return([x, y]) def trackHook(): # read image", "cX) table.putNumber('cY', cY) else: # if no contours found table.putNumber('cX', -1) table.putNumber('cY', -1)", "if len(filteredContours) > 0: # default index and x value iTargetContour = 0;", "frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg',", "trackHook(): # read image from camera, resize to 320x240, convert to HSV ret,", "COLOR_MAX) if DEBUG: cv2.imshow('frame', frame) # find contours based on thresholded image _,", "x = a[0] y = a[1] rho = np.sqrt(x**2 + y**2) phi =", "y**2) phi = np.arctan2(y, x) return([rho, phi]) def pol2cart(a): rho = a[0] phi", "= np.arctan2(y, x) return([rho, phi]) def pol2cart(a): rho = a[0] phi = a[1]", "= 0; # searches for index of most rightward contours for i in", "print(cX, \" \", cY) table.putNumber('cX', cX) table.putNumber('cY', cY) else: # if no contours", "contours based on thresholded image _, contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #", "(0,0), fx=0.5, fy=0.5) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg', frame)", "y = a[1] rho = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return([rho,", "np.array([85, 255, 255]) VIEW_ANGLE = 60 * 360 / 6.283185307 # (for lifecam", "a[1] rho = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return([rho, phi]) def", "phi]) def pol2cart(a): rho = a[0] phi = a[1] x = rho *", "0; maxRightness = 0; # searches for index of most rightward contours for", "rightward contours for i in range(0, len(filteredContours)): # analyze centre X M =", "M[\"m00\"]) if cX > maxRightness: maxRightness = cX iTargetContour = i targetContour =", "range(0, len(contours)): if cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds most rightward (highest x-val)", "contours if len(filteredContours) > 0: # default index and x value iTargetContour =", "targetContour = filteredContours[iTargetContour] M = cv2.moments(targetContour) cX = int(M[\"m10\"] / M[\"m00\"]) cY =", "= cv2.moments(filteredContours[i]) cX = int(M[\"m10\"] / M[\"m00\"]) if cX > maxRightness: maxRightness =", "most rightward (highest x-val) contour from filtered contours if len(filteredContours) > 0: #", "NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60, 80, 80]) COLOR_MAX = np.array([85, 255, 255]) VIEW_ANGLE =", "than given min area filteredContours = [] for i in range(0, len(contours)): if", "given min area filteredContours = [] for i in range(0, len(contours)): if cv2.contourArea(contours[i])", "COLOR_MAX = np.array([85, 255, 255]) VIEW_ANGLE = 60 * 360 / 6.283185307 #", "255]) VIEW_ANGLE = 60 * 360 / 6.283185307 # (for lifecam 3000) FOV_PIXEL", "NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60, 80, 80]) COLOR_MAX = np.array([85, 255,", "= 60 * 360 / 6.283185307 # (for lifecam 3000) FOV_PIXEL = 320", "table.putNumber('cX', -1) table.putNumber('cY', -1) # end of trackHook() cap = cv2.VideoCapture(HOOK_CAM_ID) while True:", "51 # width of retroreflective tape, in cm MIN_HOOK_AREA = 150 MIN_BOIL_AREA =", "NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60,", "HOOK_TARGET_LENGTH = 51 # width of retroreflective tape, in cm MIN_HOOK_AREA = 150", "networktables import NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN", "DEBUG = False # HOOK_TARGET_LENGTH = 51 # width of retroreflective tape, in", "area filteredContours = [] for i in range(0, len(contours)): if cv2.contourArea(contours[i]) > MIN_HOOK_AREA:", "based on thresholded image _, contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates", "table.putNumber('cX', cX) table.putNumber('cY', cY) else: # if no contours found table.putNumber('cX', -1) table.putNumber('cY',", "COLOR_MIN, COLOR_MAX) if DEBUG: cv2.imshow('frame', frame) # find contours based on thresholded image", "if DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg', frame) # threshold image based on HSV range", "for index of most rightward contours for i in range(0, len(filteredContours)): # analyze", "from camera, resize to 320x240, convert to HSV ret, frame = cap.read() frame=cv2.resize(frame,", "/home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60, 80, 80]) COLOR_MAX", "# creates array of contours larger than given min area filteredContours = []", "# searches for index of most rightward contours for i in range(0, len(filteredContours)):", "255, 255]) VIEW_ANGLE = 60 * 360 / 6.283185307 # (for lifecam 3000)", "y = rho * np.sin(phi) return([x, y]) def trackHook(): # read image from", "np.arctan2(y, x) return([rho, phi]) def pol2cart(a): rho = a[0] phi = a[1] x", "cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg', frame) # threshold image based on HSV", "contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates array of contours larger than", "in cm MIN_HOOK_AREA = 150 MIN_BOIL_AREA = 100 def cart2pol(a): x = a[0]", "= 0; maxRightness = 0; # searches for index of most rightward contours", "/ M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) print(cX, \" \", cY) table.putNumber('cX', cX)", "cX > maxRightness: maxRightness = cX iTargetContour = i targetContour = filteredContours[iTargetContour] M", "= 100 def cart2pol(a): x = a[0] y = a[1] rho = np.sqrt(x**2", "= cv2.moments(targetContour) cX = int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) print(cX,", "cY) table.putNumber('cX', cX) table.putNumber('cY', cY) else: # if no contours found table.putNumber('cX', -1)", "= cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg', frame) # threshold image based", "MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds most rightward (highest x-val) contour from filtered contours if", "filteredContours.append(contours[i]) # finds most rightward (highest x-val) contour from filtered contours if len(filteredContours)", "os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60, 80,", "found table.putNumber('cX', -1) table.putNumber('cY', -1) # end of trackHook() cap = cv2.VideoCapture(HOOK_CAM_ID) while", "= 1 DEBUG = False # HOOK_TARGET_LENGTH = 51 # width of retroreflective", "VIEW_ANGLE = 60 * 360 / 6.283185307 # (for lifecam 3000) FOV_PIXEL =", "hsv) cv2.imshow('brg', frame) # threshold image based on HSV range provided by COLOR_MIN", "import NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN =", "60 * 360 / 6.283185307 # (for lifecam 3000) FOV_PIXEL = 320 HOOK_CAM_ID", "phi = a[1] x = rho * np.cos(phi) y = rho * np.sin(phi)", "min area filteredContours = [] for i in range(0, len(contours)): if cv2.contourArea(contours[i]) >", "if cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds most rightward (highest x-val) contour from", "= np.array([60, 80, 80]) COLOR_MAX = np.array([85, 255, 255]) VIEW_ANGLE = 60 *", "centre X M = cv2.moments(filteredContours[i]) cX = int(M[\"m10\"] / M[\"m00\"]) if cX >", "fy=0.5) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg', frame) # threshold", "frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if DEBUG: cv2.imshow('frame', frame) # find contours based", "= int(M[\"m10\"] / M[\"m00\"]) if cX > maxRightness: maxRightness = cX iTargetContour =", "NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60, 80, 80]) COLOR_MAX =", "rho * np.sin(phi) return([x, y]) def trackHook(): # read image from camera, resize", "= cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates array of contours larger than given min", "= 150 MIN_BOIL_AREA = 100 def cart2pol(a): x = a[0] y = a[1]", "index and x value iTargetContour = 0; maxRightness = 0; # searches for", "= a[1] rho = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return([rho, phi])", "frame) # find contours based on thresholded image _, contours, heirarchy = cv2.findContours(frame,", "of most rightward contours for i in range(0, len(filteredContours)): # analyze centre X", "for i in range(0, len(filteredContours)): # analyze centre X M = cv2.moments(filteredContours[i]) cX", "M = cv2.moments(targetContour) cX = int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"])", "0 BOIL_CAM_ID = 1 DEBUG = False # HOOK_TARGET_LENGTH = 51 # width", "= cX iTargetContour = i targetContour = filteredContours[iTargetContour] M = cv2.moments(targetContour) cX =", "phi = np.arctan2(y, x) return([rho, phi]) def pol2cart(a): rho = a[0] phi =", "6.283185307 # (for lifecam 3000) FOV_PIXEL = 320 HOOK_CAM_ID = 0 BOIL_CAM_ID =", "MIN_HOOK_AREA = 150 MIN_BOIL_AREA = 100 def cart2pol(a): x = a[0] y =", "= False # HOOK_TARGET_LENGTH = 51 # width of retroreflective tape, in cm", "360 / 6.283185307 # (for lifecam 3000) FOV_PIXEL = 320 HOOK_CAM_ID = 0", "to 320x240, convert to HSV ret, frame = cap.read() frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5)", "> 0: # default index and x value iTargetContour = 0; maxRightness =", "= a[0] y = a[1] rho = np.sqrt(x**2 + y**2) phi = np.arctan2(y,", "= np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return([rho, phi]) def pol2cart(a): rho", "resize to 320x240, convert to HSV ret, frame = cap.read() frame=cv2.resize(frame, (0,0), fx=0.5,", "# finds most rightward (highest x-val) contour from filtered contours if len(filteredContours) >", "i in range(0, len(contours)): if cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds most rightward", "[] for i in range(0, len(contours)): if cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds", "def cart2pol(a): x = a[0] y = a[1] rho = np.sqrt(x**2 + y**2)", "frame) # threshold image based on HSV range provided by COLOR_MIN and COLOR_MAX", "cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if DEBUG: cv2.imshow('frame', frame) # find contours based on thresholded", "on thresholded image _, contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates array", "find contours based on thresholded image _, contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)", "= NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60, 80, 80]) COLOR_MAX = np.array([85, 255, 255]) VIEW_ANGLE", "image based on HSV range provided by COLOR_MIN and COLOR_MAX frame = cv2.inRange(hsv,", "cv2.imshow('frame', frame) # find contours based on thresholded image _, contours, heirarchy =", "most rightward contours for i in range(0, len(filteredContours)): # analyze centre X M", "M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) print(cX, \" \", cY) table.putNumber('cX', cX) table.putNumber('cY',", "len(filteredContours)): # analyze centre X M = cv2.moments(filteredContours[i]) cX = int(M[\"m10\"] / M[\"m00\"])", "and COLOR_MAX frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if DEBUG: cv2.imshow('frame', frame) # find", "convert to HSV ret, frame = cap.read() frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5) hsv =", "BOIL_CAM_ID = 1 DEBUG = False # HOOK_TARGET_LENGTH = 51 # width of", "COLOR_MAX frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if DEBUG: cv2.imshow('frame', frame) # find contours", "HSV ret, frame = cap.read() frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)", "= a[1] x = rho * np.cos(phi) y = rho * np.sin(phi) return([x,", "= rho * np.sin(phi) return([x, y]) def trackHook(): # read image from camera,", "320x240, convert to HSV ret, frame = cap.read() frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5) hsv", "0: # default index and x value iTargetContour = 0; maxRightness = 0;", "cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds most rightward (highest x-val) contour from filtered", "if cX > maxRightness: maxRightness = cX iTargetContour = i targetContour = filteredContours[iTargetContour]", "= 51 # width of retroreflective tape, in cm MIN_HOOK_AREA = 150 MIN_BOIL_AREA", "def pol2cart(a): rho = a[0] phi = a[1] x = rho * np.cos(phi)", "retroreflective tape, in cm MIN_HOOK_AREA = 150 MIN_BOIL_AREA = 100 def cart2pol(a): x", "image from camera, resize to 320x240, convert to HSV ret, frame = cap.read()", "contours found table.putNumber('cX', -1) table.putNumber('cY', -1) # end of trackHook() cap = cv2.VideoCapture(HOOK_CAM_ID)", "= rho * np.cos(phi) y = rho * np.sin(phi) return([x, y]) def trackHook():", "a[0] y = a[1] rho = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x)", "table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60, 80, 80]) COLOR_MAX = np.array([85, 255, 255])", "default index and x value iTargetContour = 0; maxRightness = 0; # searches", "larger than given min area filteredContours = [] for i in range(0, len(contours)):", "i targetContour = filteredContours[iTargetContour] M = cv2.moments(targetContour) cX = int(M[\"m10\"] / M[\"m00\"]) cY", "int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) print(cX, \" \", cY) table.putNumber('cX',", "M[\"m00\"]) print(cX, \" \", cY) table.putNumber('cX', cX) table.putNumber('cY', cY) else: # if no", "# end of trackHook() cap = cv2.VideoCapture(HOOK_CAM_ID) while True: trackHook() if DEBUG: cap.release()", "> maxRightness: maxRightness = cX iTargetContour = i targetContour = filteredContours[iTargetContour] M =", "M = cv2.moments(filteredContours[i]) cX = int(M[\"m10\"] / M[\"m00\"]) if cX > maxRightness: maxRightness", "= [] for i in range(0, len(contours)): if cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i]) #", "\" \", cY) table.putNumber('cX', cX) table.putNumber('cY', cY) else: # if no contours found", "# HOOK_TARGET_LENGTH = 51 # width of retroreflective tape, in cm MIN_HOOK_AREA =", "thresholded image _, contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates array of", "= filteredContours[iTargetContour] M = cv2.moments(targetContour) cX = int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"]", "no contours found table.putNumber('cX', -1) table.putNumber('cY', -1) # end of trackHook() cap =", "iTargetContour = i targetContour = filteredContours[iTargetContour] M = cv2.moments(targetContour) cX = int(M[\"m10\"] /", "len(contours)): if cv2.contourArea(contours[i]) > MIN_HOOK_AREA: filteredContours.append(contours[i]) # finds most rightward (highest x-val) contour", "cv2.imshow('brg', frame) # threshold image based on HSV range provided by COLOR_MIN and", "HSV range provided by COLOR_MIN and COLOR_MAX frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if", "# (for lifecam 3000) FOV_PIXEL = 320 HOOK_CAM_ID = 0 BOIL_CAM_ID = 1", "contours for i in range(0, len(filteredContours)): # analyze centre X M = cv2.moments(filteredContours[i])", "from filtered contours if len(filteredContours) > 0: # default index and x value", "as np import cv2 from networktables import NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode()", "/ M[\"m00\"]) if cX > maxRightness: maxRightness = cX iTargetContour = i targetContour", "rho = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return([rho, phi]) def pol2cart(a):", "+ y**2) phi = np.arctan2(y, x) return([rho, phi]) def pol2cart(a): rho = a[0]", "= cap.read() frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv',", "on HSV range provided by COLOR_MIN and COLOR_MAX frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX)", "by COLOR_MIN and COLOR_MAX frame = cv2.inRange(hsv, COLOR_MIN, COLOR_MAX) if DEBUG: cv2.imshow('frame', frame)", "cv2.moments(targetContour) cX = int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) print(cX, \"", "= a[0] phi = a[1] x = rho * np.cos(phi) y = rho", "# width of retroreflective tape, in cm MIN_HOOK_AREA = 150 MIN_BOIL_AREA = 100", "cap.read() frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv', hsv)", "import os import numpy as np import cv2 from networktables import NetworkTable os.system(\"sudo", "# if no contours found table.putNumber('cX', -1) table.putNumber('cY', -1) # end of trackHook()", "80]) COLOR_MAX = np.array([85, 255, 255]) VIEW_ANGLE = 60 * 360 / 6.283185307", "filtered contours if len(filteredContours) > 0: # default index and x value iTargetContour", "ret, frame = cap.read() frame=cv2.resize(frame, (0,0), fx=0.5, fy=0.5) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if", "len(filteredContours) > 0: # default index and x value iTargetContour = 0; maxRightness", "finds most rightward (highest x-val) contour from filtered contours if len(filteredContours) > 0:", "-1) table.putNumber('cY', -1) # end of trackHook() cap = cv2.VideoCapture(HOOK_CAM_ID) while True: trackHook()", "index of most rightward contours for i in range(0, len(filteredContours)): # analyze centre", "= int(M[\"m01\"] / M[\"m00\"]) print(cX, \" \", cY) table.putNumber('cX', cX) table.putNumber('cY', cY) else:", "return([x, y]) def trackHook(): # read image from camera, resize to 320x240, convert", "bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table = NetworkTable.getTable(\"HookContoursReport\") COLOR_MIN = np.array([60, 80, 80])", "= 0 BOIL_CAM_ID = 1 DEBUG = False # HOOK_TARGET_LENGTH = 51 #", "rightward (highest x-val) contour from filtered contours if len(filteredContours) > 0: # default", "np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return([rho, phi]) def pol2cart(a): rho =", "iTargetContour = 0; maxRightness = 0; # searches for index of most rightward", "contours larger than given min area filteredContours = [] for i in range(0,", "table.putNumber('cY', cY) else: # if no contours found table.putNumber('cX', -1) table.putNumber('cY', -1) #", "of contours larger than given min area filteredContours = [] for i in", "np.sin(phi) return([x, y]) def trackHook(): # read image from camera, resize to 320x240,", "image _, contours, heirarchy = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creates array of contours", "tape, in cm MIN_HOOK_AREA = 150 MIN_BOIL_AREA = 100 def cart2pol(a): x =", "maxRightness: maxRightness = cX iTargetContour = i targetContour = filteredContours[iTargetContour] M = cv2.moments(targetContour)", "# threshold image based on HSV range provided by COLOR_MIN and COLOR_MAX frame", "* np.sin(phi) return([x, y]) def trackHook(): # read image from camera, resize to", "cX = int(M[\"m10\"] / M[\"m00\"]) cY = int(M[\"m01\"] / M[\"m00\"]) print(cX, \" \",", "threshold image based on HSV range provided by COLOR_MIN and COLOR_MAX frame =", "read image from camera, resize to 320x240, convert to HSV ret, frame =", "of retroreflective tape, in cm MIN_HOOK_AREA = 150 MIN_BOIL_AREA = 100 def cart2pol(a):", "cart2pol(a): x = a[0] y = a[1] rho = np.sqrt(x**2 + y**2) phi", "os import numpy as np import cv2 from networktables import NetworkTable os.system(\"sudo bash", "cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg', frame) # threshold image based on", "np.array([60, 80, 80]) COLOR_MAX = np.array([85, 255, 255]) VIEW_ANGLE = 60 * 360", "creates array of contours larger than given min area filteredContours = [] for", "hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) if DEBUG: cv2.imshow('hsv', hsv) cv2.imshow('brg', frame) # threshold image", "DEBUG: cv2.imshow('frame', frame) # find contours based on thresholded image _, contours, heirarchy", "cv2.imshow('hsv', hsv) cv2.imshow('brg', frame) # threshold image based on HSV range provided by", "lifecam 3000) FOV_PIXEL = 320 HOOK_CAM_ID = 0 BOIL_CAM_ID = 1 DEBUG =", "cv2.CHAIN_APPROX_SIMPLE) # creates array of contours larger than given min area filteredContours =", "# default index and x value iTargetContour = 0; maxRightness = 0; #", "cv2 from networktables import NetworkTable os.system(\"sudo bash /home/pi/vision/init.sh\") NetworkTable.setIPAddress(\"roboRIO-4914-FRC.local\") NetworkTable.setClientMode() NetworkTable.initialize() table =", "cY) else: # if no contours found table.putNumber('cX', -1) table.putNumber('cY', -1) # end" ]
[ "UserDAL(): def __init__(self, db_session: Session): self.db_session = db_session async def get_user_by_email(self, email: str):", "sqlalchemy.orm import Session, noload from sqlalchemy.future import select from sqlalchemy import update from", "return q.scalars().first() async def create_user(self, user: schemas.UserCreate): new_user = models.User( email=user.email, firstname=user.firstname, secondname=user.secondname,", "db_session: Session): self.db_session = db_session async def get_user_by_email(self, email: str): stmt = select(models.User).\\", "models, schemas class UserDAL(): def __init__(self, db_session: Session): self.db_session = db_session async def", "db_session async def get_user_by_email(self, email: str): stmt = select(models.User).\\ where(models.User.email == email).\\ options(noload('*'))", "async def create_user(self, user: schemas.UserCreate): new_user = models.User( email=user.email, firstname=user.firstname, secondname=user.secondname, ) new_user.set_password(<PASSWORD>)", "from sqlalchemy import update from uuid import UUID from . import models, schemas", "user_uuid: UUID): stmt = select(models.User).\\ where(models.User.uuid == user_uuid).\\ options(noload('*')) q = await self.db_session.execute(stmt)", "Session): self.db_session = db_session async def get_user_by_email(self, email: str): stmt = select(models.User).\\ where(models.User.email", "import models, schemas class UserDAL(): def __init__(self, db_session: Session): self.db_session = db_session async", "from uuid import UUID from . import models, schemas class UserDAL(): def __init__(self,", "def get_user_by_email(self, email: str): stmt = select(models.User).\\ where(models.User.email == email).\\ options(noload('*')) q =", "schemas class UserDAL(): def __init__(self, db_session: Session): self.db_session = db_session async def get_user_by_email(self,", "stmt = select(models.User).\\ where(models.User.email == email).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first()", "== email).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def get_user_by_uuid(self, user_uuid:", "stmt = select(models.User).\\ where(models.User.uuid == user_uuid).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first()", "update from uuid import UUID from . import models, schemas class UserDAL(): def", "__init__(self, db_session: Session): self.db_session = db_session async def get_user_by_email(self, email: str): stmt =", "sqlalchemy import update from uuid import UUID from . import models, schemas class", "email).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def get_user_by_uuid(self, user_uuid: UUID):", "select(models.User).\\ where(models.User.uuid == user_uuid).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def", "from sqlalchemy.orm import Session, noload from sqlalchemy.future import select from sqlalchemy import update", "options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def get_user_by_uuid(self, user_uuid: UUID): stmt", "class UserDAL(): def __init__(self, db_session: Session): self.db_session = db_session async def get_user_by_email(self, email:", "get_user_by_email(self, email: str): stmt = select(models.User).\\ where(models.User.email == email).\\ options(noload('*')) q = await", "await self.db_session.execute(stmt) return q.scalars().first() async def get_user_by_uuid(self, user_uuid: UUID): stmt = select(models.User).\\ where(models.User.uuid", "= await self.db_session.execute(stmt) return q.scalars().first() async def create_user(self, user: schemas.UserCreate): new_user = models.User(", "= await self.db_session.execute(stmt) return q.scalars().first() async def get_user_by_uuid(self, user_uuid: UUID): stmt = select(models.User).\\", "user_uuid).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def create_user(self, user: schemas.UserCreate):", ". import models, schemas class UserDAL(): def __init__(self, db_session: Session): self.db_session = db_session", "schemas.UserCreate): new_user = models.User( email=user.email, firstname=user.firstname, secondname=user.secondname, ) new_user.set_password(<PASSWORD>) self.db_session.add(new_user) return await self.db_session.flush()", "q.scalars().first() async def create_user(self, user: schemas.UserCreate): new_user = models.User( email=user.email, firstname=user.firstname, secondname=user.secondname, )", "import UUID from . import models, schemas class UserDAL(): def __init__(self, db_session: Session):", "from sqlalchemy.future import select from sqlalchemy import update from uuid import UUID from", "async def get_user_by_email(self, email: str): stmt = select(models.User).\\ where(models.User.email == email).\\ options(noload('*')) q", "= select(models.User).\\ where(models.User.uuid == user_uuid).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async", "select from sqlalchemy import update from uuid import UUID from . import models,", "select(models.User).\\ where(models.User.email == email).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def", "async def get_user_by_uuid(self, user_uuid: UUID): stmt = select(models.User).\\ where(models.User.uuid == user_uuid).\\ options(noload('*')) q", "get_user_by_uuid(self, user_uuid: UUID): stmt = select(models.User).\\ where(models.User.uuid == user_uuid).\\ options(noload('*')) q = await", "q = await self.db_session.execute(stmt) return q.scalars().first() async def create_user(self, user: schemas.UserCreate): new_user =", "= db_session async def get_user_by_email(self, email: str): stmt = select(models.User).\\ where(models.User.email == email).\\", "Session, noload from sqlalchemy.future import select from sqlalchemy import update from uuid import", "def get_user_by_uuid(self, user_uuid: UUID): stmt = select(models.User).\\ where(models.User.uuid == user_uuid).\\ options(noload('*')) q =", "await self.db_session.execute(stmt) return q.scalars().first() async def create_user(self, user: schemas.UserCreate): new_user = models.User( email=user.email,", "def create_user(self, user: schemas.UserCreate): new_user = models.User( email=user.email, firstname=user.firstname, secondname=user.secondname, ) new_user.set_password(<PASSWORD>) self.db_session.add(new_user)", "q = await self.db_session.execute(stmt) return q.scalars().first() async def get_user_by_uuid(self, user_uuid: UUID): stmt =", "where(models.User.uuid == user_uuid).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def create_user(self,", "== user_uuid).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def create_user(self, user:", "create_user(self, user: schemas.UserCreate): new_user = models.User( email=user.email, firstname=user.firstname, secondname=user.secondname, ) new_user.set_password(<PASSWORD>) self.db_session.add(new_user) return", "self.db_session.execute(stmt) return q.scalars().first() async def get_user_by_uuid(self, user_uuid: UUID): stmt = select(models.User).\\ where(models.User.uuid ==", "return q.scalars().first() async def get_user_by_uuid(self, user_uuid: UUID): stmt = select(models.User).\\ where(models.User.uuid == user_uuid).\\", "noload from sqlalchemy.future import select from sqlalchemy import update from uuid import UUID", "from . import models, schemas class UserDAL(): def __init__(self, db_session: Session): self.db_session =", "def __init__(self, db_session: Session): self.db_session = db_session async def get_user_by_email(self, email: str): stmt", "import update from uuid import UUID from . import models, schemas class UserDAL():", "sqlalchemy.future import select from sqlalchemy import update from uuid import UUID from .", "q.scalars().first() async def get_user_by_uuid(self, user_uuid: UUID): stmt = select(models.User).\\ where(models.User.uuid == user_uuid).\\ options(noload('*'))", "where(models.User.email == email).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def get_user_by_uuid(self,", "import Session, noload from sqlalchemy.future import select from sqlalchemy import update from uuid", "import select from sqlalchemy import update from uuid import UUID from . import", "UUID): stmt = select(models.User).\\ where(models.User.uuid == user_uuid).\\ options(noload('*')) q = await self.db_session.execute(stmt) return", "self.db_session.execute(stmt) return q.scalars().first() async def create_user(self, user: schemas.UserCreate): new_user = models.User( email=user.email, firstname=user.firstname,", "str): stmt = select(models.User).\\ where(models.User.email == email).\\ options(noload('*')) q = await self.db_session.execute(stmt) return", "= select(models.User).\\ where(models.User.email == email).\\ options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async", "UUID from . import models, schemas class UserDAL(): def __init__(self, db_session: Session): self.db_session", "user: schemas.UserCreate): new_user = models.User( email=user.email, firstname=user.firstname, secondname=user.secondname, ) new_user.set_password(<PASSWORD>) self.db_session.add(new_user) return await", "email: str): stmt = select(models.User).\\ where(models.User.email == email).\\ options(noload('*')) q = await self.db_session.execute(stmt)", "self.db_session = db_session async def get_user_by_email(self, email: str): stmt = select(models.User).\\ where(models.User.email ==", "options(noload('*')) q = await self.db_session.execute(stmt) return q.scalars().first() async def create_user(self, user: schemas.UserCreate): new_user", "uuid import UUID from . import models, schemas class UserDAL(): def __init__(self, db_session:" ]
[ "r\"fortran\", r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\", r\"include\", r\"inline\", r\"int\", r\"line\", r\"long\", r\"pragma\", r\"register\",", "is not closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT = _COMMENT", "= r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\"", "= _NUMBER_FLO1 + \"|\" + _NUMBER_FLO2 _REGEX_NUMBER = \"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT,", "+ _NUMBER_FLO2 _REGEX_NUMBER = \"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] ) NUMBER", "r\"float\", r\"fortran\", r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\", r\"include\", r\"inline\", r\"int\", r\"line\", r\"long\", r\"pragma\",", "catastrofic backtracking if a string is not closed _STR_HEADERS = [\"L\", \"u8\", \"u\",", "= r\"(?P<COMMENT>\" + _REGEX_COMMENT + \")\" # string regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" #", "= [\"L\", \"u8\", \"u\", \"U\"] _STRING_PREFIX = r\"(\" + \"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING", "[ r\"_Alignas\", r\"_Alignof\", r\"_Atomic\", r\"_Bool\", r\"_Complex\", r\"_Decimal128\", r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\",", "r\"if\", r\"include\", r\"inline\", r\"int\", r\"line\", r\"long\", r\"pragma\", r\"register\", r\"restrict\", r\"return\", r\"short\", r\"signed\", r\"sizeof\",", "r\"_Bool\", r\"_Complex\", r\"_Decimal128\", r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\",", "+ \")\" # string regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a", "FULL_CREGEX def get_clean_indent_regex(self): return RM_INDENT def get_remove_doublespaces_regex(self): return RM_MULTIPLE_SPACES def get_str_headers(self): return _STR_HEADERS", "_NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1 + \"|\" + _NUMBER_FLO2 _REGEX_NUMBER = \"|\".join(", "+ _INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO =", "r\"(?P<OP>\" + _REGEX_OP + \")\" # numbers regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT =", "[] _CHAR_PREFIX = \"\" # no char literals for C _REGEX_CHAR = (", "# catastrofic backtracking if a string is not closed _CHAR_HEADERS = [] _CHAR_PREFIX", "# numbers regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX =", "_INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX", "backtracking if a string is not closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") +", "+ _INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI", "not-[\\w] character (for example it match +,-,*,-,...) except spaces _REGEX_OP = r\"[^\\s\\w]\" OP", "RM_INDENT def get_remove_doublespaces_regex(self): return RM_MULTIPLE_SPACES def get_str_headers(self): return _STR_HEADERS def get_chr_headers(self): return _CHAR_HEADERS", "[_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] ) NUMBER = r\"(?P<NUMBER>\" + _REGEX_NUMBER +", "r\"_Decimal128\", r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\", r\"case\", r\"char\",", "+ r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") + \")\" ) CHAR = r\"(?P<CHAR>\" + _REGEX_CHAR +", "def get_full_regex(self): return FULL_CREGEX def get_clean_indent_regex(self): return RM_INDENT def get_remove_doublespaces_regex(self): return RM_MULTIPLE_SPACES def", "names _REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\" + _REGEX_NAME + \")\" # match", "not closed _STR_HEADERS = [\"L\", \"u8\", \"u\", \"U\"] _STRING_PREFIX = r\"(\" + \"|\".join(_STR_HEADERS)", "_STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed _STR_HEADERS", "+ _REGEX_OP + \")\" # numbers regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\"", "r\"continue\", r\"default\", r\"define\", r\"double\", r\"do\", r\"elif\", r\"else\", r\"endif\", r\"enum\", r\"error\", r\"extern\", r\"float\", r\"fortran\",", "r\"include\", r\"inline\", r\"int\", r\"line\", r\"long\", r\"pragma\", r\"register\", r\"restrict\", r\"return\", r\"short\", r\"signed\", r\"sizeof\", r\"static\",", "r\"return\", r\"short\", r\"signed\", r\"sizeof\", r\"static\", r\"struct\", r\"switch\", r\"typedef\", r\"undef\", r\"union\", r\"unsigned\", r\"void\", r\"volatile\",", "variable names _REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\" + _REGEX_NAME + \")\" #", "= r\"(?P<NAME>\" + _REGEX_NAME + \")\" # match every not-[\\w] character (for example", "r\"_Alignas\", r\"_Alignof\", r\"_Atomic\", r\"_Bool\", r\"_Complex\", r\"_Decimal128\", r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\",", "_INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1", "= r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1 + \"|\" + _NUMBER_FLO2 _REGEX_NUMBER", "_REGEX_COMMENT = _COMMENT + \"|\" + _COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\" + _REGEX_COMMENT +", "# comments regex _COMMENT = r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if", "if a string is not closed _STR_HEADERS = [\"L\", \"u8\", \"u\", \"U\"] _STRING_PREFIX", "not closed _CHAR_HEADERS = [] _CHAR_PREFIX = \"\" # no char literals for", "CHAR = r\"(?P<CHAR>\" + _REGEX_CHAR + \")\" FULL_CREGEX = \"|\".join([COMMENT, STRING, CHAR, KEYWORD,", "_REGEX_CHAR + \")\" FULL_CREGEX = \"|\".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME]) class", "= r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed _STR_HEADERS =", "NAME] def get_full_regex(self): return FULL_CREGEX def get_clean_indent_regex(self): return RM_INDENT def get_remove_doublespaces_regex(self): return RM_MULTIPLE_SPACES", "_REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for kw in LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\" + _REGEX_KEYWORD +", "catastrofic backtracking if a string is not closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\")", "character (for example it match +,-,*,-,...) except spaces _REGEX_OP = r\"[^\\s\\w]\" OP =", "\"|\".join([r\"{}(?!\\w)\".format(kw) for kw in LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\" + _REGEX_KEYWORD + \")\" #", "string regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not", "\")\" ) CHAR = r\"(?P<CHAR>\" + _REGEX_CHAR + \")\" FULL_CREGEX = \"|\".join([COMMENT, STRING,", "def get_clean_indent_regex(self): return RM_INDENT def get_remove_doublespaces_regex(self): return RM_MULTIPLE_SPACES def get_str_headers(self): return _STR_HEADERS def", "r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\", r\"include\", r\"inline\", r\"int\", r\"line\", r\"long\", r\"pragma\", r\"register\", r\"restrict\", r\"return\",", "= r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\" +", "_REGEX_CHAR = ( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") + \")\" ) CHAR =", "= \"|\".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME]) class CRegex: def __init__(self): self.regex_groups", "+ _REGEX_KEYWORD + \")\" # variable names _REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\"", "r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\" + _INT_SUFFIX", "every not-[\\w] character (for example it match +,-,*,-,...) except spaces _REGEX_OP = r\"[^\\s\\w]\"", "is not closed _STR_HEADERS = [\"L\", \"u8\", \"u\", \"U\"] _STRING_PREFIX = r\"(\" +", "LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\" + _REGEX_KEYWORD + \")\" # variable names _REGEX_NAME =", "r\"(?P<COMMENT>\" + _REGEX_COMMENT + \")\" # string regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic", "regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed", "r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed _STR_HEADERS = [\"L\",", "= \"(?P<KW>\" + _REGEX_KEYWORD + \")\" # variable names _REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME", "\"(?P<KW>\" + _REGEX_KEYWORD + \")\" # variable names _REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME =", "\")\" # string regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string", "r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed", "regex _COMMENT = r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string", "it match +,-,*,-,...) except spaces _REGEX_OP = r\"[^\\s\\w]\" OP = r\"(?P<OP>\" + _REGEX_OP", ") NUMBER = r\"(?P<NUMBER>\" + _REGEX_NUMBER + \")\" # comments regex _COMMENT =", "+ _REGEX_COMMENT + \")\" # string regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking", "r\"struct\", r\"switch\", r\"typedef\", r\"undef\", r\"union\", r\"unsigned\", r\"void\", r\"volatile\", r\"while\", ] # cleaning regex", "_REGEX_NUMBER + \")\" # comments regex _COMMENT = r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" #", "+ \")\" FULL_CREGEX = \"|\".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME]) class CRegex:", "+ _INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT", "r\"(?P<NAME>\" + _REGEX_NAME + \")\" # match every not-[\\w] character (for example it", "+ \"|\" + _COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\" + _REGEX_COMMENT + \")\" # string", "literals for C _REGEX_CHAR = ( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") + \")\"", "= r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed _CHAR_HEADERS =", "__init__(self): self.regex_groups = [COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME] def get_full_regex(self): return", "+ _INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\"", "r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT = _COMMENT + \"|\" + _COMMENT_MULTILINE COMMENT", "+ _CHAR_CB.format(\"CHR\") + \")\" ) CHAR = r\"(?P<CHAR>\" + _REGEX_CHAR + \")\" FULL_CREGEX", "backtracking if a string is not closed _STR_HEADERS = [\"L\", \"u8\", \"u\", \"U\"]", "backtracking if a string is not closed _CHAR_HEADERS = [] _CHAR_PREFIX = \"\"", "if a string is not closed _CHAR_HEADERS = [] _CHAR_PREFIX = \"\" #", "= r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\" + _REGEX_NAME + \")\" # match every not-[\\w]", "= \"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] ) NUMBER = r\"(?P<NUMBER>\" +", "r\"typedef\", r\"undef\", r\"union\", r\"unsigned\", r\"void\", r\"volatile\", r\"while\", ] # cleaning regex RM_INDENT =", "catastrofic backtracking if a string is not closed _CHAR_HEADERS = [] _CHAR_PREFIX =", "\")\" # match every not-[\\w] character (for example it match +,-,*,-,...) except spaces", "\")?\" _REGEX_STRING = ( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") + \")\" ) STRING", "r\"int\", r\"line\", r\"long\", r\"pragma\", r\"register\", r\"restrict\", r\"return\", r\"short\", r\"signed\", r\"sizeof\", r\"static\", r\"struct\", r\"switch\",", "= r\"(?P<CHAR>\" + _REGEX_CHAR + \")\" FULL_CREGEX = \"|\".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER,", "+ \"|\" + _NUMBER_FLO2 _REGEX_NUMBER = \"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO]", "_REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\" + _REGEX_NAME + \")\" # match every", "r\"else\", r\"endif\", r\"enum\", r\"error\", r\"extern\", r\"float\", r\"fortran\", r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\", r\"include\",", "r\"elif\", r\"else\", r\"endif\", r\"enum\", r\"error\", r\"extern\", r\"float\", r\"fortran\", r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\",", "r\"(\" + \"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING = ( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\")", "closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT = _COMMENT + \"|\"", "OP = r\"(?P<OP>\" + _REGEX_OP + \")\" # numbers regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\"", "_REGEX_STRING + \")\" # char regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if", "string is not closed _STR_HEADERS = [\"L\", \"u8\", \"u\", \"U\"] _STRING_PREFIX = r\"(\"", "# char regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is", "+ \")\" _REGEX_COMMENT = _COMMENT + \"|\" + _COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\" +", "kw in LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\" + _REGEX_KEYWORD + \")\" # variable names", "\")\" _REGEX_COMMENT = _COMMENT + \"|\" + _COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\" + _REGEX_COMMENT", "+ \")\" # variable names _REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\" + _REGEX_NAME", "_NUMBER_INT = r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 =", "( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") + \")\" ) STRING = r\"(?P<STRING>\" +", "r\"short\", r\"signed\", r\"sizeof\", r\"static\", r\"struct\", r\"switch\", r\"typedef\", r\"undef\", r\"union\", r\"unsigned\", r\"void\", r\"volatile\", r\"while\",", "STRING, CHAR, KEYWORD, NUMBER, OP, NAME]) class CRegex: def __init__(self): self.regex_groups = [COMMENT,", "# variable names _REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\" + _REGEX_NAME + \")\"", "if a string is not closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") + \")\"", "_STR_HEADERS = [\"L\", \"u8\", \"u\", \"U\"] _STRING_PREFIX = r\"(\" + \"|\".join(_STR_HEADERS) + \")?\"", "LIST_KEYWORDS = [ r\"_Alignas\", r\"_Alignof\", r\"_Atomic\", r\"_Bool\", r\"_Complex\", r\"_Decimal128\", r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\",", "r\"endif\", r\"enum\", r\"error\", r\"extern\", r\"float\", r\"fortran\", r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\", r\"include\", r\"inline\",", "no char literals for C _REGEX_CHAR = ( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\")", "for C _REGEX_CHAR = ( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") + \")\" )", "r\"unsigned\", r\"void\", r\"volatile\", r\"while\", ] # cleaning regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES =", "CRegex: def __init__(self): self.regex_groups = [COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME] def", "+ \")\" ) STRING = r\"(?P<STRING>\" + _REGEX_STRING + \")\" # char regex", "= [COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME] def get_full_regex(self): return FULL_CREGEX def", "r\"[^\\s\\w]\" OP = r\"(?P<OP>\" + _REGEX_OP + \")\" # numbers regex _INT_SUFFIX =", "r\"_Complex\", r\"_Decimal128\", r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\", r\"case\",", "get_clean_indent_regex(self): return RM_INDENT def get_remove_doublespaces_regex(self): return RM_MULTIPLE_SPACES def get_str_headers(self): return _STR_HEADERS def get_chr_headers(self):", "_COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\" + _REGEX_COMMENT + \")\" # string regex _STRING_CB =", "r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" # Language keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for kw in", "r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1 + \"|\" +", "_STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed _COMMENT_MULTILINE", "_REGEX_KEYWORD + \")\" # variable names _REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\" +", "\"|\" + _COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\" + _REGEX_COMMENT + \")\" # string regex", "# string regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is", "OP, NAME] def get_full_regex(self): return FULL_CREGEX def get_clean_indent_regex(self): return RM_INDENT def get_remove_doublespaces_regex(self): return", "r\"enum\", r\"error\", r\"extern\", r\"float\", r\"fortran\", r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\", r\"include\", r\"inline\", r\"int\",", "STRING = r\"(?P<STRING>\" + _REGEX_STRING + \")\" # char regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\"", "r\"inline\", r\"int\", r\"line\", r\"long\", r\"pragma\", r\"register\", r\"restrict\", r\"return\", r\"short\", r\"signed\", r\"sizeof\", r\"static\", r\"struct\",", "_NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1 + \"|\" + _NUMBER_FLO2", "r\"const\", r\"continue\", r\"default\", r\"define\", r\"double\", r\"do\", r\"elif\", r\"else\", r\"endif\", r\"enum\", r\"error\", r\"extern\", r\"float\",", "= r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" # Language keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for kw", "r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\", r\"case\", r\"char\", r\"const\",", "r\"error\", r\"extern\", r\"float\", r\"fortran\", r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\", r\"include\", r\"inline\", r\"int\", r\"line\",", "r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") + \")\" ) CHAR = r\"(?P<CHAR>\" + _REGEX_CHAR + \")\"", "+ \")?\" _REGEX_STRING = ( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") + \")\" )", "is not closed _CHAR_HEADERS = [] _CHAR_PREFIX = \"\" # no char literals", "r\"(?P<CHAR>\" + _REGEX_CHAR + \")\" FULL_CREGEX = \"|\".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP,", "_NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\"", "_REGEX_NAME + \")\" # match every not-[\\w] character (for example it match +,-,*,-,...)", "_NUMBER_FLO = _NUMBER_FLO1 + \"|\" + _NUMBER_FLO2 _REGEX_NUMBER = \"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX,", "class CRegex: def __init__(self): self.regex_groups = [COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME]", "\")\" # numbers regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX", "+ r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") + \")\" ) STRING = r\"(?P<STRING>\" + _REGEX_STRING +", "= r\"[^\\n\\S]+\" # Language keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for kw in LIST_KEYWORDS]) KEYWORD", "comments regex _COMMENT = r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a", "KEYWORD = \"(?P<KW>\" + _REGEX_KEYWORD + \")\" # variable names _REGEX_NAME = r\"[^\\d\\W]\\w*\"", "_NUMBER_OCT = r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\"", "\")\" # comments regex _COMMENT = r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking", "KEYWORD, NUMBER, OP, NAME] def get_full_regex(self): return FULL_CREGEX def get_clean_indent_regex(self): return RM_INDENT def", "C _REGEX_CHAR = ( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") + \")\" ) CHAR", "r\"case\", r\"char\", r\"const\", r\"continue\", r\"default\", r\"define\", r\"double\", r\"do\", r\"elif\", r\"else\", r\"endif\", r\"enum\", r\"error\",", "r\"void\", r\"volatile\", r\"while\", ] # cleaning regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\"", "not closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT = _COMMENT +", "\"|\" + _NUMBER_FLO2 _REGEX_NUMBER = \"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] )", "+ \")\" # char regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a", "+ _REGEX_STRING + \")\" # char regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking", "+ \")\" # match every not-[\\w] character (for example it match +,-,*,-,...) except", "spaces _REGEX_OP = r\"[^\\s\\w]\" OP = r\"(?P<OP>\" + _REGEX_OP + \")\" # numbers", "= r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN", "(for example it match +,-,*,-,...) except spaces _REGEX_OP = r\"[^\\s\\w]\" OP = r\"(?P<OP>\"", "string is not closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT =", "+ _STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT = _COMMENT + \"|\" + _COMMENT_MULTILINE COMMENT =", "= r\"(?P<STRING>\" + _REGEX_STRING + \")\" # char regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" #", "r\"ifndef\", r\"if\", r\"include\", r\"inline\", r\"int\", r\"line\", r\"long\", r\"pragma\", r\"register\", r\"restrict\", r\"return\", r\"short\", r\"signed\",", "= r\"(\" + \"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING = ( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' +", "r\"do\", r\"elif\", r\"else\", r\"endif\", r\"enum\", r\"error\", r\"extern\", r\"float\", r\"fortran\", r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\",", "char literals for C _REGEX_CHAR = ( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") +", "a string is not closed _CHAR_HEADERS = [] _CHAR_PREFIX = \"\" # no", "= r\"(?P<OP>\" + _REGEX_OP + \")\" # numbers regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT", "_COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT = _COMMENT + \"|\" +", "_CHAR_PREFIX = \"\" # no char literals for C _REGEX_CHAR = ( _CHAR_PREFIX", "= r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed _COMMENT_MULTILINE =", "_NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] ) NUMBER = r\"(?P<NUMBER>\" + _REGEX_NUMBER + \")\" # comments", "\")\" # variable names _REGEX_NAME = r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\" + _REGEX_NAME +", "# no char literals for C _REGEX_CHAR = ( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" +", "[\"L\", \"u8\", \"u\", \"U\"] _STRING_PREFIX = r\"(\" + \"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING =", "# cleaning regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" # Language keywords _REGEX_KEYWORD", "r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO", "_CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed _CHAR_HEADERS", "\"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING = ( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") + \")\"", "get_full_regex(self): return FULL_CREGEX def get_clean_indent_regex(self): return RM_INDENT def get_remove_doublespaces_regex(self): return RM_MULTIPLE_SPACES def get_str_headers(self):", "NUMBER, OP, NAME] def get_full_regex(self): return FULL_CREGEX def get_clean_indent_regex(self): return RM_INDENT def get_remove_doublespaces_regex(self):", "r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\", r\"case\", r\"char\", r\"const\", r\"continue\", r\"default\", r\"define\", r\"double\", r\"do\", r\"elif\",", "closed _CHAR_HEADERS = [] _CHAR_PREFIX = \"\" # no char literals for C", "r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\", r\"case\", r\"char\", r\"const\", r\"continue\",", "r\"_Atomic\", r\"_Bool\", r\"_Complex\", r\"_Decimal128\", r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\",", "\")\" ) STRING = r\"(?P<STRING>\" + _REGEX_STRING + \")\" # char regex _CHAR_CB", "NUMBER, OP, NAME]) class CRegex: def __init__(self): self.regex_groups = [COMMENT, STRING, CHAR, KEYWORD,", "r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") + \")\" ) STRING = r\"(?P<STRING>\" + _REGEX_STRING + \")\"", "r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed _CHAR_HEADERS = []", "a string is not closed _STR_HEADERS = [\"L\", \"u8\", \"u\", \"U\"] _STRING_PREFIX =", "r\"volatile\", r\"while\", ] # cleaning regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" #", "\")\" # char regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string", "closed _STR_HEADERS = [\"L\", \"u8\", \"u\", \"U\"] _STRING_PREFIX = r\"(\" + \"|\".join(_STR_HEADERS) +", "_NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1 +", "r\"register\", r\"restrict\", r\"return\", r\"short\", r\"signed\", r\"sizeof\", r\"static\", r\"struct\", r\"switch\", r\"typedef\", r\"undef\", r\"union\", r\"unsigned\",", "_INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI =", "+ _REGEX_NUMBER + \")\" # comments regex _COMMENT = r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\"", "r\"default\", r\"define\", r\"double\", r\"do\", r\"elif\", r\"else\", r\"endif\", r\"enum\", r\"error\", r\"extern\", r\"float\", r\"fortran\", r\"for\",", "_NUMBER_FLO2 _REGEX_NUMBER = \"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] ) NUMBER =", "r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\", r\"case\", r\"char\", r\"const\", r\"continue\", r\"default\", r\"define\", r\"double\", r\"do\",", "OP, NAME]) class CRegex: def __init__(self): self.regex_groups = [COMMENT, STRING, CHAR, KEYWORD, NUMBER,", "r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 =", "_REGEX_COMMENT + \")\" # string regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if", "+ \")\" ) CHAR = r\"(?P<CHAR>\" + _REGEX_CHAR + \")\" FULL_CREGEX = \"|\".join([COMMENT,", "CHAR, KEYWORD, NUMBER, OP, NAME] def get_full_regex(self): return FULL_CREGEX def get_clean_indent_regex(self): return RM_INDENT", "r\"(?P<NUMBER>\" + _REGEX_NUMBER + \")\" # comments regex _COMMENT = r\"//[^\\n]*\" _STRING_CB =", "r\"sizeof\", r\"static\", r\"struct\", r\"switch\", r\"typedef\", r\"undef\", r\"union\", r\"unsigned\", r\"void\", r\"volatile\", r\"while\", ] #", "= ( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") + \")\" ) STRING = r\"(?P<STRING>\"", "in LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\" + _REGEX_KEYWORD + \")\" # variable names _REGEX_NAME", "r\"switch\", r\"typedef\", r\"undef\", r\"union\", r\"unsigned\", r\"void\", r\"volatile\", r\"while\", ] # cleaning regex RM_INDENT", "_COMMENT = r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is", "return FULL_CREGEX def get_clean_indent_regex(self): return RM_INDENT def get_remove_doublespaces_regex(self): return RM_MULTIPLE_SPACES def get_str_headers(self): return", "\"\" # no char literals for C _REGEX_CHAR = ( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\"", "string is not closed _CHAR_HEADERS = [] _CHAR_PREFIX = \"\" # no char", "_NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] ) NUMBER = r\"(?P<NUMBER>\" + _REGEX_NUMBER + \")\" #", "\"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] ) NUMBER = r\"(?P<NUMBER>\" + _REGEX_NUMBER", "regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" # Language keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw)", "r\"undef\", r\"union\", r\"unsigned\", r\"void\", r\"volatile\", r\"while\", ] # cleaning regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\"", "_STRING_PREFIX = r\"(\" + \"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING = ( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|'", "+ _REGEX_CHAR + \")\" FULL_CREGEX = \"|\".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME])", "r\"define\", r\"double\", r\"do\", r\"elif\", r\"else\", r\"endif\", r\"enum\", r\"error\", r\"extern\", r\"float\", r\"fortran\", r\"for\", r\"goto\",", "except spaces _REGEX_OP = r\"[^\\s\\w]\" OP = r\"(?P<OP>\" + _REGEX_OP + \")\" #", "# match every not-[\\w] character (for example it match +,-,*,-,...) except spaces _REGEX_OP", "+ _REGEX_NAME + \")\" # match every not-[\\w] character (for example it match", "r\"extern\", r\"float\", r\"fortran\", r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\", r\"include\", r\"inline\", r\"int\", r\"line\", r\"long\",", "COMMENT = r\"(?P<COMMENT>\" + _REGEX_COMMENT + \")\" # string regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\"", "_CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") + \")\" ) CHAR = r\"(?P<CHAR>\" + _REGEX_CHAR", "= r\"(?P<NUMBER>\" + _REGEX_NUMBER + \")\" # comments regex _COMMENT = r\"//[^\\n]*\" _STRING_CB", "match +,-,*,-,...) except spaces _REGEX_OP = r\"[^\\s\\w]\" OP = r\"(?P<OP>\" + _REGEX_OP +", "r\"pragma\", r\"register\", r\"restrict\", r\"return\", r\"short\", r\"signed\", r\"sizeof\", r\"static\", r\"struct\", r\"switch\", r\"typedef\", r\"undef\", r\"union\",", "_NUMBER_BIN = r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\"", "NAME = r\"(?P<NAME>\" + _REGEX_NAME + \")\" # match every not-[\\w] character (for", "\"U\"] _STRING_PREFIX = r\"(\" + \"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING = ( _STRING_PREFIX +", "+,-,*,-,...) except spaces _REGEX_OP = r\"[^\\s\\w]\" OP = r\"(?P<OP>\" + _REGEX_OP + \")\"", "r\"[^\\n\\S]+\" # Language keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for kw in LIST_KEYWORDS]) KEYWORD =", "_NUMBER_SCI, _NUMBER_FLO] ) NUMBER = r\"(?P<NUMBER>\" + _REGEX_NUMBER + \")\" # comments regex", "_CHAR_CB.format(\"CHR\") + \")\" ) CHAR = r\"(?P<CHAR>\" + _REGEX_CHAR + \")\" FULL_CREGEX =", "+ _STRING_CB.format(\"STR\") + \")\" ) STRING = r\"(?P<STRING>\" + _REGEX_STRING + \")\" #", "= r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\" +", "regex _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed", "= r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT = _COMMENT + \"|\" + _COMMENT_MULTILINE", "r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\"", "( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") + \")\" ) CHAR = r\"(?P<CHAR>\" +", "a string is not closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" + _STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT", "return RM_INDENT def get_remove_doublespaces_regex(self): return RM_MULTIPLE_SPACES def get_str_headers(self): return _STR_HEADERS def get_chr_headers(self): return", "= \"|\".join([r\"{}(?!\\w)\".format(kw) for kw in LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\" + _REGEX_KEYWORD + \")\"", "+ _COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\" + _REGEX_COMMENT + \")\" # string regex _STRING_CB", "= ( _CHAR_PREFIX + r\"'(\\\\(\\\\|'|\\\"|\\?|a|b|f|n|r|t|v|[0-9]{1,3}|x[a-fA-F0-9]+)|\\s|\\w){0,1}('|\" + _CHAR_CB.format(\"CHR\") + \")\" ) CHAR = r\"(?P<CHAR>\"", "r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN =", "r\"[^\\d\\W]\\w*\" NAME = r\"(?P<NAME>\" + _REGEX_NAME + \")\" # match every not-[\\w] character", "= [] _CHAR_PREFIX = \"\" # no char literals for C _REGEX_CHAR =", ") STRING = r\"(?P<STRING>\" + _REGEX_STRING + \")\" # char regex _CHAR_CB =", "= r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not", "r\"asm\", r\"auto\", r\"break\", r\"case\", r\"char\", r\"const\", r\"continue\", r\"default\", r\"define\", r\"double\", r\"do\", r\"elif\", r\"else\",", "r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\", r\"case\", r\"char\", r\"const\", r\"continue\", r\"default\", r\"define\",", "\"u\", \"U\"] _STRING_PREFIX = r\"(\" + \"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING = ( _STRING_PREFIX", "RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" # Language keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for", "+ \")\" # comments regex _COMMENT = r\"//[^\\n]*\" _STRING_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic", "\"u8\", \"u\", \"U\"] _STRING_PREFIX = r\"(\" + \"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING = (", "r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\" + _INT_SUFFIX", "_REGEX_STRING = ( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") + \")\" ) STRING =", "r\"ifdef\", r\"ifndef\", r\"if\", r\"include\", r\"inline\", r\"int\", r\"line\", r\"long\", r\"pragma\", r\"register\", r\"restrict\", r\"return\", r\"short\",", "= r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1 + \"|\" + _NUMBER_FLO2 _REGEX_NUMBER = \"|\".join( [_NUMBER_BIN,", "FULL_CREGEX = \"|\".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME]) class CRegex: def __init__(self):", "= r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1", ") CHAR = r\"(?P<CHAR>\" + _REGEX_CHAR + \")\" FULL_CREGEX = \"|\".join([COMMENT, STRING, CHAR,", "= r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1 + \"|\"", "# Language keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for kw in LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\"", "r\"while\", ] # cleaning regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" # Language", "CHAR, KEYWORD, NUMBER, OP, NAME]) class CRegex: def __init__(self): self.regex_groups = [COMMENT, STRING,", "regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" +", "match every not-[\\w] character (for example it match +,-,*,-,...) except spaces _REGEX_OP =", "r\"(?P<STRING>\" + _REGEX_STRING + \")\" # char regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic", "[COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME] def get_full_regex(self): return FULL_CREGEX def get_clean_indent_regex(self):", "r\"signed\", r\"sizeof\", r\"static\", r\"struct\", r\"switch\", r\"typedef\", r\"undef\", r\"union\", r\"unsigned\", r\"void\", r\"volatile\", r\"while\", ]", "cleaning regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" # Language keywords _REGEX_KEYWORD =", "_REGEX_OP = r\"[^\\s\\w]\" OP = r\"(?P<OP>\" + _REGEX_OP + \")\" # numbers regex", "self.regex_groups = [COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME] def get_full_regex(self): return FULL_CREGEX", "+ \")\" # numbers regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\" + _INT_SUFFIX", "_STRING_CB.format(\"STR\") + \")\" ) STRING = r\"(?P<STRING>\" + _REGEX_STRING + \")\" # char", "r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2 = r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1 + \"|\" + _NUMBER_FLO2 _REGEX_NUMBER =", "r\"restrict\", r\"return\", r\"short\", r\"signed\", r\"sizeof\", r\"static\", r\"struct\", r\"switch\", r\"typedef\", r\"undef\", r\"union\", r\"unsigned\", r\"void\",", "_REGEX_OP + \")\" # numbers regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\" +", "# catastrofic backtracking if a string is not closed _COMMENT_MULTILINE = r\"/\\*(.|[\\r\\n])*?(\\*/|\" +", "= [ r\"_Alignas\", r\"_Alignof\", r\"_Atomic\", r\"_Bool\", r\"_Complex\", r\"_Decimal128\", r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\",", "_INT_SUFFIX _NUMBER_INT = r\"[^0\\D][\\d]*\" + _INT_SUFFIX _NUMBER_SCI = r\"[\\d]+[\\.]?[\\d]*[eE][+-]?[\\d]+\" _NUMBER_FLO1 = r\"[\\d]+[\\.]?[\\d]*\" _NUMBER_FLO2", "r\"auto\", r\"break\", r\"case\", r\"char\", r\"const\", r\"continue\", r\"default\", r\"define\", r\"double\", r\"do\", r\"elif\", r\"else\", r\"endif\",", "_NUMBER_FLO1 + \"|\" + _NUMBER_FLO2 _REGEX_NUMBER = \"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI,", "] # cleaning regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" # Language keywords", "_NUMBER_FLO] ) NUMBER = r\"(?P<NUMBER>\" + _REGEX_NUMBER + \")\" # comments regex _COMMENT", "RM_MULTIPLE_SPACES = r\"[^\\n\\S]+\" # Language keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for kw in LIST_KEYWORDS])", "KEYWORD, NUMBER, OP, NAME]) class CRegex: def __init__(self): self.regex_groups = [COMMENT, STRING, CHAR,", "_CHAR_HEADERS = [] _CHAR_PREFIX = \"\" # no char literals for C _REGEX_CHAR", "example it match +,-,*,-,...) except spaces _REGEX_OP = r\"[^\\s\\w]\" OP = r\"(?P<OP>\" +", "r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\", r\"case\", r\"char\", r\"const\", r\"continue\", r\"default\", r\"define\", r\"double\",", "numbers regex _INT_SUFFIX = r\"(lu|lU|ul|uL|Lu|LU|Ul|UL|l|u|L|U)?\" _NUMBER_OCT = r\"[0][0-7]+\" + _INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\"", "\"|\".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME]) class CRegex: def __init__(self): self.regex_groups =", "# catastrofic backtracking if a string is not closed _STR_HEADERS = [\"L\", \"u8\",", "= r\"[^\\s\\w]\" OP = r\"(?P<OP>\" + _REGEX_OP + \")\" # numbers regex _INT_SUFFIX", "_INT_SUFFIX _NUMBER_HEX = r\"[0][xX][\\da-fA-F]+\" + _INT_SUFFIX _NUMBER_BIN = r\"[0][bB][0-1]+\" + _INT_SUFFIX _NUMBER_INT =", "r\"line\", r\"long\", r\"pragma\", r\"register\", r\"restrict\", r\"return\", r\"short\", r\"signed\", r\"sizeof\", r\"static\", r\"struct\", r\"switch\", r\"typedef\",", "r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\", r\"auto\", r\"break\", r\"case\", r\"char\", r\"const\", r\"continue\", r\"default\",", "r\"break\", r\"case\", r\"char\", r\"const\", r\"continue\", r\"default\", r\"define\", r\"double\", r\"do\", r\"elif\", r\"else\", r\"endif\", r\"enum\",", "STRING, CHAR, KEYWORD, NUMBER, OP, NAME] def get_full_regex(self): return FULL_CREGEX def get_clean_indent_regex(self): return", "_COMMENT + \"|\" + _COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\" + _REGEX_COMMENT + \")\" #", "r\"for\", r\"goto\", r\"ifdef\", r\"ifndef\", r\"if\", r\"include\", r\"inline\", r\"int\", r\"line\", r\"long\", r\"pragma\", r\"register\", r\"restrict\",", "r\"char\", r\"const\", r\"continue\", r\"default\", r\"define\", r\"double\", r\"do\", r\"elif\", r\"else\", r\"endif\", r\"enum\", r\"error\", r\"extern\",", "r\"static\", r\"struct\", r\"switch\", r\"typedef\", r\"undef\", r\"union\", r\"unsigned\", r\"void\", r\"volatile\", r\"while\", ] # cleaning", "def __init__(self): self.regex_groups = [COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME] def get_full_regex(self):", "_NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] ) NUMBER = r\"(?P<NUMBER>\" + _REGEX_NUMBER + \")\"", "NAME]) class CRegex: def __init__(self): self.regex_groups = [COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP,", "r\"long\", r\"pragma\", r\"register\", r\"restrict\", r\"return\", r\"short\", r\"signed\", r\"sizeof\", r\"static\", r\"struct\", r\"switch\", r\"typedef\", r\"undef\",", "char regex _CHAR_CB = r\"(?P<ERROR_{}>\\Z)\" # catastrofic backtracking if a string is not", "+ \"|\".join(_STR_HEADERS) + \")?\" _REGEX_STRING = ( _STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") +", "r\"_Alignof\", r\"_Atomic\", r\"_Bool\", r\"_Complex\", r\"_Decimal128\", r\"_Decimal32\", r\"_Decimal64\", r\"_Generic\", r\"_Imaginary\", r\"_Noreturn\", r\"_Static_assert\", r\"_Thread_local\", r\"asm\",", "r\"union\", r\"unsigned\", r\"void\", r\"volatile\", r\"while\", ] # cleaning regex RM_INDENT = r\"(//[^\\n]*\\n)|(\\s*^\\s*)\" RM_MULTIPLE_SPACES", "r\"double\", r\"do\", r\"elif\", r\"else\", r\"endif\", r\"enum\", r\"error\", r\"extern\", r\"float\", r\"fortran\", r\"for\", r\"goto\", r\"ifdef\",", "Language keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for kw in LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\" +", "for kw in LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\" + _REGEX_KEYWORD + \")\" # variable", "keywords _REGEX_KEYWORD = \"|\".join([r\"{}(?!\\w)\".format(kw) for kw in LIST_KEYWORDS]) KEYWORD = \"(?P<KW>\" + _REGEX_KEYWORD", "NUMBER = r\"(?P<NUMBER>\" + _REGEX_NUMBER + \")\" # comments regex _COMMENT = r\"//[^\\n]*\"", "\")\" FULL_CREGEX = \"|\".join([COMMENT, STRING, CHAR, KEYWORD, NUMBER, OP, NAME]) class CRegex: def", "_REGEX_NUMBER = \"|\".join( [_NUMBER_BIN, _NUMBER_OCT, _NUMBER_HEX, _NUMBER_INT, _NUMBER_SCI, _NUMBER_FLO] ) NUMBER = r\"(?P<NUMBER>\"", "_STRING_PREFIX + r'\"(\\\\\\n|\\\\\"|\\\\\\\\|[^\"]|.\\n])*(\"|' + _STRING_CB.format(\"STR\") + \")\" ) STRING = r\"(?P<STRING>\" + _REGEX_STRING", "= \"\" # no char literals for C _REGEX_CHAR = ( _CHAR_PREFIX +", "_STRING_CB.format(\"COMM_M\") + \")\" _REGEX_COMMENT = _COMMENT + \"|\" + _COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\"", "= _COMMENT + \"|\" + _COMMENT_MULTILINE COMMENT = r\"(?P<COMMENT>\" + _REGEX_COMMENT + \")\"", "r\"[\\.][\\d]+\" _NUMBER_FLO = _NUMBER_FLO1 + \"|\" + _NUMBER_FLO2 _REGEX_NUMBER = \"|\".join( [_NUMBER_BIN, _NUMBER_OCT," ]
[ "Some other formatters can make things easier. \"\"\" from typing import Iterable, Tuple", ":created: 10/30/2020 The `string_conversion` module will format floats or strings. Some other formatters", "\"\"\" return \" \".join(str(round(x)) for x in floats) def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) ->", "for x in floats) def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str: \"\"\" Space-delimited tuples", "aren't floats or strings. :author: <NAME> :created: 10/30/2020 The `string_conversion` module will format", "will format floats or strings. Some other formatters can make things easier. \"\"\"", "typing import Iterable, Tuple from ..string_conversion import format_number def svg_color_tuple(rgb_floats): \"\"\" Turn an", "(0-255, 0-255, 0-255) :return: \"rgb(128,128,128)\" \"\"\" r, g, b = (round(x) for x", "<NAME> :created: 10/30/2020 The `string_conversion` module will format floats or strings. Some other", "\"\"\" r, g, b = (round(x) for x in rgb_floats) return f\"rgb({r},{g},{b})\" def", "floats :return: each float rounded to an int, space delimited \"\"\" return \"", "Tuple from ..string_conversion import format_number def svg_color_tuple(rgb_floats): \"\"\" Turn an rgb tuple (0-255,", "0-255, 0-255) :return: \"rgb(128,128,128)\" \"\"\" r, g, b = (round(x) for x in", "things easier. \"\"\" from typing import Iterable, Tuple from ..string_conversion import format_number def", "\"rgb(128,128,128)\" \"\"\" r, g, b = (round(x) for x in rgb_floats) return f\"rgb({r},{g},{b})\"", "return \" \".join(str(round(x)) for x in floats) def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str:", "return f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float]) -> str: \"\"\" Space-delimited ints :param floats: and", "..string_conversion import format_number def svg_color_tuple(rgb_floats): \"\"\" Turn an rgb tuple (0-255, 0-255, 0-255)", "from ..string_conversion import format_number def svg_color_tuple(rgb_floats): \"\"\" Turn an rgb tuple (0-255, 0-255,", "# _*_ coding: utf-8 _*_ \"\"\"Explicit string formatting calls for arguments that aren't", "an rgb tuple (0-255, 0-255, 0-255) into an svg color definition. :param rgb_floats:", "Iterable, Tuple from ..string_conversion import format_number def svg_color_tuple(rgb_floats): \"\"\" Turn an rgb tuple", "for arguments that aren't floats or strings. :author: <NAME> :created: 10/30/2020 The `string_conversion`", "def svg_color_tuple(rgb_floats): \"\"\" Turn an rgb tuple (0-255, 0-255, 0-255) into an svg", "tuples = [\",\".join(format_number(x) for x in y) for y in tuples] return \"", "float]]) -> str: \"\"\" Space-delimited tuples :param tuples: [(a, b), (c, d)] :return:", "of floats :return: each float rounded to an int, space delimited \"\"\" return", "strings. Some other formatters can make things easier. \"\"\" from typing import Iterable,", "that aren't floats or strings. :author: <NAME> :created: 10/30/2020 The `string_conversion` module will", "number of floats :return: each float rounded to an int, space delimited \"\"\"", "floats) def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str: \"\"\" Space-delimited tuples :param tuples: [(a,", "_*_ coding: utf-8 _*_ \"\"\"Explicit string formatting calls for arguments that aren't floats", "color definition. :param rgb_floats: (0-255, 0-255, 0-255) :return: \"rgb(128,128,128)\" \"\"\" r, g, b", "delimited \"\"\" return \" \".join(str(round(x)) for x in floats) def svg_float_tuples(tuples: Iterable[Tuple[float, float]])", "ints :param floats: and number of floats :return: each float rounded to an", "rgb tuple (0-255, 0-255, 0-255) into an svg color definition. :param rgb_floats: (0-255,", "svg_ints(floats: Iterable[float]) -> str: \"\"\" Space-delimited ints :param floats: and number of floats", "svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str: \"\"\" Space-delimited tuples :param tuples: [(a, b), (c,", "Iterable[float]) -> str: \"\"\" Space-delimited ints :param floats: and number of floats :return:", "coding: utf-8 _*_ \"\"\"Explicit string formatting calls for arguments that aren't floats or", "\"\"\" tuples = [\",\".join(format_number(x) for x in y) for y in tuples] return", "make things easier. \"\"\" from typing import Iterable, Tuple from ..string_conversion import format_number", "-> str: \"\"\" Space-delimited tuples :param tuples: [(a, b), (c, d)] :return: \"a,b", "import format_number def svg_color_tuple(rgb_floats): \"\"\" Turn an rgb tuple (0-255, 0-255, 0-255) into", "(round(x) for x in rgb_floats) return f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float]) -> str: \"\"\"", "space delimited \"\"\" return \" \".join(str(round(x)) for x in floats) def svg_float_tuples(tuples: Iterable[Tuple[float,", "an int, space delimited \"\"\" return \" \".join(str(round(x)) for x in floats) def", "svg_color_tuple(rgb_floats): \"\"\" Turn an rgb tuple (0-255, 0-255, 0-255) into an svg color", ":param floats: and number of floats :return: each float rounded to an int,", "tuples: [(a, b), (c, d)] :return: \"a,b c,d\" \"\"\" tuples = [\",\".join(format_number(x) for", "floats or strings. Some other formatters can make things easier. \"\"\" from typing", "str: \"\"\" Space-delimited tuples :param tuples: [(a, b), (c, d)] :return: \"a,b c,d\"", "into an svg color definition. :param rgb_floats: (0-255, 0-255, 0-255) :return: \"rgb(128,128,128)\" \"\"\"", "def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str: \"\"\" Space-delimited tuples :param tuples: [(a, b),", ":param tuples: [(a, b), (c, d)] :return: \"a,b c,d\" \"\"\" tuples = [\",\".join(format_number(x)", "and number of floats :return: each float rounded to an int, space delimited", "strings. :author: <NAME> :created: 10/30/2020 The `string_conversion` module will format floats or strings.", "calls for arguments that aren't floats or strings. :author: <NAME> :created: 10/30/2020 The", "g, b = (round(x) for x in rgb_floats) return f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float])", "formatting calls for arguments that aren't floats or strings. :author: <NAME> :created: 10/30/2020", "in floats) def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str: \"\"\" Space-delimited tuples :param tuples:", "int, space delimited \"\"\" return \" \".join(str(round(x)) for x in floats) def svg_float_tuples(tuples:", "format_number def svg_color_tuple(rgb_floats): \"\"\" Turn an rgb tuple (0-255, 0-255, 0-255) into an", "arguments that aren't floats or strings. :author: <NAME> :created: 10/30/2020 The `string_conversion` module", "rgb_floats) return f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float]) -> str: \"\"\" Space-delimited ints :param floats:", "d)] :return: \"a,b c,d\" \"\"\" tuples = [\",\".join(format_number(x) for x in y) for", "import Iterable, Tuple from ..string_conversion import format_number def svg_color_tuple(rgb_floats): \"\"\" Turn an rgb", "format floats or strings. Some other formatters can make things easier. \"\"\" from", "[(a, b), (c, d)] :return: \"a,b c,d\" \"\"\" tuples = [\",\".join(format_number(x) for x", "definition. :param rgb_floats: (0-255, 0-255, 0-255) :return: \"rgb(128,128,128)\" \"\"\" r, g, b =", "in rgb_floats) return f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float]) -> str: \"\"\" Space-delimited ints :param", "b), (c, d)] :return: \"a,b c,d\" \"\"\" tuples = [\",\".join(format_number(x) for x in", "rgb_floats: (0-255, 0-255, 0-255) :return: \"rgb(128,128,128)\" \"\"\" r, g, b = (round(x) for", "Turn an rgb tuple (0-255, 0-255, 0-255) into an svg color definition. :param", "rounded to an int, space delimited \"\"\" return \" \".join(str(round(x)) for x in", "\" \".join(str(round(x)) for x in floats) def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str: \"\"\"", "utf-8 _*_ \"\"\"Explicit string formatting calls for arguments that aren't floats or strings.", "float rounded to an int, space delimited \"\"\" return \" \".join(str(round(x)) for x", "floats: and number of floats :return: each float rounded to an int, space", "Iterable[Tuple[float, float]]) -> str: \"\"\" Space-delimited tuples :param tuples: [(a, b), (c, d)]", "tuple (0-255, 0-255, 0-255) into an svg color definition. :param rgb_floats: (0-255, 0-255,", "= [\",\".join(format_number(x) for x in y) for y in tuples] return \" \".join(tuples)", ":author: <NAME> :created: 10/30/2020 The `string_conversion` module will format floats or strings. Some", "for x in rgb_floats) return f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float]) -> str: \"\"\" Space-delimited", "10/30/2020 The `string_conversion` module will format floats or strings. Some other formatters can", "f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float]) -> str: \"\"\" Space-delimited ints :param floats: and number", "`string_conversion` module will format floats or strings. Some other formatters can make things", "svg color definition. :param rgb_floats: (0-255, 0-255, 0-255) :return: \"rgb(128,128,128)\" \"\"\" r, g,", "\".join(str(round(x)) for x in floats) def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str: \"\"\" Space-delimited", "to an int, space delimited \"\"\" return \" \".join(str(round(x)) for x in floats)", "c,d\" \"\"\" tuples = [\",\".join(format_number(x) for x in y) for y in tuples]", "an svg color definition. :param rgb_floats: (0-255, 0-255, 0-255) :return: \"rgb(128,128,128)\" \"\"\" r,", "= (round(x) for x in rgb_floats) return f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float]) -> str:", "_*_ \"\"\"Explicit string formatting calls for arguments that aren't floats or strings. :author:", "module will format floats or strings. Some other formatters can make things easier.", "-> str: \"\"\" Space-delimited ints :param floats: and number of floats :return: each", "str: \"\"\" Space-delimited ints :param floats: and number of floats :return: each float", "python3 # _*_ coding: utf-8 _*_ \"\"\"Explicit string formatting calls for arguments that", "#!/usr/bin/env python3 # _*_ coding: utf-8 _*_ \"\"\"Explicit string formatting calls for arguments", "\"\"\" from typing import Iterable, Tuple from ..string_conversion import format_number def svg_color_tuple(rgb_floats): \"\"\"", ":return: \"rgb(128,128,128)\" \"\"\" r, g, b = (round(x) for x in rgb_floats) return", "\"\"\" Space-delimited ints :param floats: and number of floats :return: each float rounded", ":return: \"a,b c,d\" \"\"\" tuples = [\",\".join(format_number(x) for x in y) for y", "tuples :param tuples: [(a, b), (c, d)] :return: \"a,b c,d\" \"\"\" tuples =", "b = (round(x) for x in rgb_floats) return f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float]) ->", "formatters can make things easier. \"\"\" from typing import Iterable, Tuple from ..string_conversion", "\"\"\" Turn an rgb tuple (0-255, 0-255, 0-255) into an svg color definition.", "\"a,b c,d\" \"\"\" tuples = [\",\".join(format_number(x) for x in y) for y in", "def svg_ints(floats: Iterable[float]) -> str: \"\"\" Space-delimited ints :param floats: and number of", "Space-delimited ints :param floats: and number of floats :return: each float rounded to", "or strings. Some other formatters can make things easier. \"\"\" from typing import", "x in rgb_floats) return f\"rgb({r},{g},{b})\" def svg_ints(floats: Iterable[float]) -> str: \"\"\" Space-delimited ints", "can make things easier. \"\"\" from typing import Iterable, Tuple from ..string_conversion import", "0-255) :return: \"rgb(128,128,128)\" \"\"\" r, g, b = (round(x) for x in rgb_floats)", "(c, d)] :return: \"a,b c,d\" \"\"\" tuples = [\",\".join(format_number(x) for x in y)", "The `string_conversion` module will format floats or strings. Some other formatters can make", "floats or strings. :author: <NAME> :created: 10/30/2020 The `string_conversion` module will format floats", "\"\"\" Space-delimited tuples :param tuples: [(a, b), (c, d)] :return: \"a,b c,d\" \"\"\"", ":return: each float rounded to an int, space delimited \"\"\" return \" \".join(str(round(x))", "each float rounded to an int, space delimited \"\"\" return \" \".join(str(round(x)) for", "x in floats) def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str: \"\"\" Space-delimited tuples :param", "r, g, b = (round(x) for x in rgb_floats) return f\"rgb({r},{g},{b})\" def svg_ints(floats:", "(0-255, 0-255, 0-255) into an svg color definition. :param rgb_floats: (0-255, 0-255, 0-255)", "or strings. :author: <NAME> :created: 10/30/2020 The `string_conversion` module will format floats or", "other formatters can make things easier. \"\"\" from typing import Iterable, Tuple from", ":param rgb_floats: (0-255, 0-255, 0-255) :return: \"rgb(128,128,128)\" \"\"\" r, g, b = (round(x)", "0-255) into an svg color definition. :param rgb_floats: (0-255, 0-255, 0-255) :return: \"rgb(128,128,128)\"", "easier. \"\"\" from typing import Iterable, Tuple from ..string_conversion import format_number def svg_color_tuple(rgb_floats):", "0-255, 0-255) into an svg color definition. :param rgb_floats: (0-255, 0-255, 0-255) :return:", "Space-delimited tuples :param tuples: [(a, b), (c, d)] :return: \"a,b c,d\" \"\"\" tuples", "\"\"\"Explicit string formatting calls for arguments that aren't floats or strings. :author: <NAME>", "string formatting calls for arguments that aren't floats or strings. :author: <NAME> :created:", "from typing import Iterable, Tuple from ..string_conversion import format_number def svg_color_tuple(rgb_floats): \"\"\" Turn" ]
[ "= 0 nreq = len(required) for doc in doclist: fields_found = 0 for", "part X - test input is used print(\"Part1. Number of valid passports:\", part1(readfile()))", "eyr (Expiration Year) - four digits; at least 2020 and at most 2030.", "as follows: byr (Birth Year) iyr (Issue Year) eyr (Expiration Year) hgt (Height)", "# initial data input infilename = \"./day4.txt\" # required fields for checking required", "keyvalue = item.split(\":\") dic[keyvalue[0]] = keyvalue[1] else: # starts new document data_list.append(dic) dic", "is not, so this passport is invalid. According to the above rules, your", "hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\", \"\", \"iyr:2019 \\ hcl:#602927 eyr:1967 hgt:170cm \\", "number must be at least 59 and at most 76. hcl (Hair Color)", "of valid passports - those that have all required fields and valid values.", "= int(tuples.group(1)), tuples.group(2) if unit == \"cm\": ok = num in range(150, 193", "2010, 2020) and n_digits_check(doc[\"eyr\"], 2020, 2030) and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and", "and at most 2020. eyr (Expiration Year) - four digits; at least 2020", "valid. The fourth passport is missing two fields, cid and byr. Missing cid", "continue to ignore the cid field, but each other field has strict rules", "Here are some example values: byr valid: 2002 byr invalid: 2003 hgt valid:", "2003 hgt valid: 60in hgt valid: 190cm hgt invalid: 190in hgt invalid: 190", "testinput = [ \"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\ byr:1937 iyr:2017 cid:147 hgt:183cm\", \"\",", "they're having trouble detecting which passports have all required fields. The expected fields", "system temporarily ignore missing cid fields. Treat this \"passport\" as valid. The fourth", "if r in doc: fields_found += 1 if fields_found == nreq and extra_check(doc,", "[ \"eyr:1972 cid:100 \\ hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\", \"\", \"iyr:2019 \\", "pid (Passport ID) cid (Country ID) Passport data is validated in batch files", "fine, but missing any other field is not, so this passport is invalid.", "hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) ) def analyse(doclist, required, extra=False) -> int: #", "least 2020 and at most 2030. hgt (Height) - a number followed by", "hcl invalid: 123abc ecl valid: brn ecl invalid: wat pid valid: 000000001 pid", "\\ ecl:brn pid:760753108 byr:1931 \\ hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025 pid:166559648 \\ iyr:2011 ecl:brn", "cid as optional. In your batch file, how many passports are valid? \"\"\"", "extra: return True # checking extra rules return ( n_digits_check(doc[\"byr\"], 1920, 2002) and", "invalid: 190in hgt invalid: 190 hcl valid: #123abc hcl invalid: #123abz hcl invalid:", "+= 1 if fields_found == nreq and extra_check(doc, extra): valid += 1 return", "byr:1989 \\ iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\", ] def part2(inlist=testinput2) -> int: return analyse(parse_input(inlist),", "returns the number of valid documents according to fields listed in 'required' dictionary", "byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013", "ecl:brn hgt:59in The first passport is valid - all eight fields are present.", "\"hcl:#cfa07d eyr:2025 pid:166559648 \\ iyr:2011 ecl:brn hgt:59in\", ] # --- Part One ---", "extra): valid += 1 return valid def part1(inlist=testinput) -> int: # returns number", "that have all required fields. Treat cid as optional. In your batch file,", "must be at least 150 and at most 193. If in, the number", "= re.search(pat, txt) if not tuples: # if correct pattern not found return", "If cm, the number must be at least 150 and at most 193.", "def analyse(doclist, required, extra=False) -> int: # returns the number of valid documents", "import re # initial data input infilename = \"./day4.txt\" # required fields for", "return len(txt) == n and int(txt) in range(start, end + 1) def hgt_check(txt):", "ID) - a nine-digit number, including leading zeroes. cid (Country ID) - ignored,", "(Hair Color) - a # followed by exactly six characters 0-9 or a-f.", "Passports are separated by blank lines. Here is an example batch file containing", "1: testinput = [ \"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\ byr:1937 iyr:2017 cid:147 hgt:183cm\",", "inlist = [line.strip() for line in file] return inlist def parse_input(inlist=readfile()): data_list =", "puzzle input). Each passport is represented as a sequence of key:value pairs separated", "ok = num in range(150, 193 + 1) elif unit == \"in\": ok", "is invalid. According to the above rules, your improved system would report 2", "1920, 2002) and n_digits_check(doc[\"iyr\"], 2010, 2020) and n_digits_check(doc[\"eyr\"], 2020, 2030) and hgt_check(doc[\"hgt\"]) and", "=> belongs to the same document for item in item.split(): keyvalue = item.split(\":\")", "if unit == \"cm\": ok = num in range(150, 193 + 1) elif", "\"in\": ok = num in range(59, 76 + 1) else: ok = False", "at most 2002. iyr (Issue Year) - four digits; at least 2010 and", "passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014", "pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539", "valid = 0 nreq = len(required) for doc in doclist: fields_found = 0", "part2(inlist=testinput2) -> int: return analyse(parse_input(inlist), required, extra=True) # --- MAIN --- if __name__", "those that have all required fields and valid values. Continue to treat cid", "ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007", "hgt (Height) hcl (Hair Color) ecl (Eye Color) pid (Passport ID) cid (Country", "ecl:brn pid:760753108 byr:1931 \\ hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025 pid:166559648 \\ iyr:2011 ecl:brn hgt:59in\",", "byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001", "present and valid according to the above rules. Here are some example values:", "# list of dictionaries # artificially add empty item in order to mark", "North Pole Credentials, not a passport at all! Surely, nobody would mind if", "those that have all required fields. Treat cid as optional. In your batch", "belongs to the same document for item in item.split(): keyvalue = item.split(\":\") dic[keyvalue[0]]", "Count the number of valid passports - those that have all required fields.", "where all required fields are both present and valid according to the above", "[ \"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\ byr:1937 iyr:2017 cid:147 hgt:183cm\", \"\", \"iyr:2013 ecl:amb", "Year) - four digits; at least 2010 and at most 2020. eyr (Expiration", "passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm", "--- if __name__ == \"__main__\": # if no parameter for part X -", "(Country ID) Passport data is validated in batch files (your puzzle input). Each", "(Expiration Year) - four digits; at least 2020 and at most 2030. hgt", "to fields listed in 'required' dictionary valid = 0 nreq = len(required) for", "passports - those that have all required fields and valid values. Continue to", "in file] return inlist def parse_input(inlist=readfile()): data_list = [] # list of dictionaries", "hcl:#a97842 hgt:165cm\", ] def part2(inlist=testinput2) -> int: return analyse(parse_input(inlist), required, extra=True) # ---", "tuples = re.search(pat, txt) if not tuples: # if correct pattern not found", "input). Each passport is represented as a sequence of key:value pairs separated by", "n=4): # check for n-digits ranges return len(txt) == n and int(txt) in", "= False return ok def hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\") # compile regex return", "looks like data from North Pole Credentials, not a passport at all! Surely,", "in item.split(): keyvalue = item.split(\":\") dic[keyvalue[0]] = keyvalue[1] else: # starts new document", "all required fields. Treat cid as optional. In your batch file, how many", "= [ \"eyr:1972 cid:100 \\ hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\", \"\", \"iyr:2019", "\\ byr:1937 iyr:2017 cid:147 hgt:183cm\", \"\", \"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \\ hcl:#cfa07d", "re # initial data input infilename = \"./day4.txt\" # required fields for checking", "two fields, cid and byr. Missing cid is fine, but missing any other", "required fields for checking required = {\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"}", "required fields are both present and valid according to the above rules. Here", "cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040", "--- \"\"\" The automatic passport scanners are slow because they're having trouble detecting", "the number must be at least 150 and at most 193. # If", "If in, the number must be at least 59 and at most 76.", "return inlist def parse_input(inlist=readfile()): data_list = [] # list of dictionaries # artificially", "+ 1) def hgt_check(txt): # hgt (Height) - a number followed by either", "lines. Here is an example batch file containing four passports: ecl:gry pid:860033327 eyr:2020", "cid:100 \\ hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\", \"\", \"iyr:2019 \\ hcl:#602927 eyr:1967", "hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1", "zeroes. cid (Country ID) - ignored, missing or not. Your job is to", "most 76. hcl (Hair Color) - a # followed by exactly six characters", "valid += 1 return valid def part1(inlist=testinput) -> int: # returns number of", "cm, the number must be at least 150 and at most 193. If", "key:value pairs separated by spaces or newlines. Passports are separated by blank lines.", "cid (Country ID) - ignored, missing or not. Your job is to count", "found from the document if r in doc: fields_found += 1 if fields_found", "document inlist.append(\"\") dic = {} for item in inlist: if item: # not", "\"\", \"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \\ hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013 \\", "cm or in: # If cm, the number must be at least 150", "unit = int(tuples.group(1)), tuples.group(2) if unit == \"cm\": ok = num in range(150,", "digits; at least 2010 and at most 2020. eyr (Expiration Year) - four", "is represented as a sequence of key:value pairs separated by spaces or newlines.", "file: inlist = [line.strip() for line in file] return inlist def parse_input(inlist=readfile()): data_list", "eyr:2023 pid:028048884 \\ hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013 \\ eyr:2024 \\ ecl:brn pid:760753108", "len(txt) == n and int(txt) in range(start, end + 1) def hgt_check(txt): #", "hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108", "four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350", "ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) ) def analyse(doclist, required, extra=False) -> int: # returns the", "extra_check(doc: dict, extra): # returns True if all checks pass def n_digits_check(txt: str,", "2020 and at most 2030. hgt (Height) - a number followed by either", "byr invalid: 2003 hgt valid: 60in hgt valid: 190cm hgt invalid: 190in hgt", "item in order to mark the end of the last document inlist.append(\"\") dic", "as optional. \"\"\" # 2 valid passports for part 2: testinput2 = [", "valid documents return analyse(parse_input(inlist), required) # --- Part Two --- \"\"\" You can", "the number of valid passports - those that have all required fields and", "for part 2: testinput2 = [ \"eyr:1972 cid:100 \\ hcl:#18171d ecl:amb hgt:170 pid:186cm", "ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989", "= keyvalue[1] else: # starts new document data_list.append(dic) dic = {} return data_list", "is cid, so it looks like data from North Pole Credentials, not a", "2002 byr invalid: 2003 hgt valid: 60in hgt valid: 190cm hgt invalid: 190in", "invalid: 0123456789 __Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm", "and at most 193. If in, the number must be at least 59", "and int(txt) in range(start, end + 1) def hgt_check(txt): # hgt (Height) -", "ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227", "first passport is valid - all eight fields are present. The second passport", "hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012", "iyr:2018 byr:1926\", \"\", \"iyr:2019 \\ hcl:#602927 eyr:1967 hgt:170cm \\ ecl:grn pid:012533040 byr:1946\", \"\",", "because they're having trouble detecting which passports have all required fields. The expected", "to the above rules. Here are some example values: byr valid: 2002 byr", "infilename = \"./day4.txt\" # required fields for checking required = {\"byr\", \"iyr\", \"eyr\",", "byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a", "least 1920 and at most 2002. iyr (Issue Year) - four digits; at", "gry grn hzl oth\").split() def pid_check(txt): return txt.isdigit() and len(txt) == 9 if", "\"__main__\": # if no parameter for part X - test input is used", "#123abc hcl invalid: #123abz hcl invalid: 123abc ecl valid: brn ecl invalid: wat", "# check for n-digits ranges return len(txt) == n and int(txt) in range(start,", "valid: 60in hgt valid: 190cm hgt invalid: 190in hgt invalid: 190 hcl valid:", "number of valid passports - those that have all required fields. Treat cid", "batch file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm", "is interesting; the only missing field is cid, so it looks like data", "= re.compile(r\"#[a-f0-9]{6}\") # compile regex return re.search(pat, txt) != None def ecl_check(txt): return", "{} return data_list # 2 valid passports for part 1: testinput = [", "int: return analyse(parse_input(inlist), required, extra=True) # --- MAIN --- if __name__ == \"__main__\":", "above rules, your improved system would report 2 valid passports. Count the number", "some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927", "pat = re.compile(r\"#[a-f0-9]{6}\") # compile regex return re.search(pat, txt) != None def ecl_check(txt):", "fields_found = 0 for r in required: # check if all required fields", "byr:1980 \\ hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\",", "missing any other field is not, so this passport is invalid. According to", "test input is used print(\"Part1. Number of valid passports:\", part1(readfile())) print(\"Part2. Number of", "some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129", "\"\", \"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \\ hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu cid:129", "valid def part1(inlist=testinput) -> int: # returns number of valid documents return analyse(parse_input(inlist),", "if all required fields are found from the document if r in doc:", "valid: 000000001 pid invalid: 0123456789 __Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d", "have all required fields. Treat cid as optional. In your batch file, how", "in range(start, end + 1) def hgt_check(txt): # hgt (Height) - a number", "in required: # check if all required fields are found from the document", "number of valid passports - those that have all required fields and valid", "hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992", "n_digits_check(txt: str, start: int, end: int, n=4): # check for n-digits ranges return", "ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm", "\"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\", ] def part2(inlist=testinput2)", "hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 __Here", "# followed by exactly six characters 0-9 or a-f. ecl (Eye Color) -", "\"\"\" # 2 valid passports for part 2: testinput2 = [ \"eyr:1972 cid:100", "fields_found += 1 if fields_found == nreq and extra_check(doc, extra): valid += 1", "iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785", "(Passport ID) cid (Country ID) Passport data is validated in batch files (your", "valid values. Continue to treat cid as optional. \"\"\" # 2 valid passports", "field, but each other field has strict rules about what values are valid", "end: int, n=4): # check for n-digits ranges return len(txt) == n and", "analyse(parse_input(inlist), required, extra=True) # --- MAIN --- if __name__ == \"__main__\": # if", "report 2 valid passports. Count the number of valid passports - those that", "mark the end of the last document inlist.append(\"\") dic = {} for item", "passport is valid - all eight fields are present. The second passport is", "passports. Count the number of valid passports - those that have all required", "range(start, end + 1) def hgt_check(txt): # hgt (Height) - a number followed", "in doc: fields_found += 1 if fields_found == nreq and extra_check(doc, extra): valid", "# returns the number of valid documents according to fields listed in 'required'", "pid invalid: 0123456789 __Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170", "\\ ecl:grn pid:012533040 byr:1946\", \"\", \"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \\ hcl:#623a2f\",", "\\ iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\", ] def part2(inlist=testinput2) -> int: return analyse(parse_input(inlist), required,", "hcl:#fffffd \\ byr:1937 iyr:2017 cid:147 hgt:183cm\", \"\", \"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \\", "else: ok = False return ok def hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\") # compile", "mind if you made the system temporarily ignore missing cid fields. Treat this", "to the same document for item in item.split(): keyvalue = item.split(\":\") dic[keyvalue[0]] =", "txt) != None def ecl_check(txt): return txt in (\"amb blu brn gry grn", "pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648", "cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 __Here are some valid passports:", "re.compile(r\"(\\d+)(cm|in)\") # compile regex tuples = re.search(pat, txt) if not tuples: # if", "= item.split(\":\") dic[keyvalue[0]] = keyvalue[1] else: # starts new document data_list.append(dic) dic =", "cid fields. Treat this \"passport\" as valid. The fourth passport is missing two", "2002) and n_digits_check(doc[\"iyr\"], 2010, 2020) and n_digits_check(doc[\"eyr\"], 2020, 2030) and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"])", "1 if fields_found == nreq and extra_check(doc, extra): valid += 1 return valid", "Color) ecl (Eye Color) pid (Passport ID) cid (Country ID) Passport data is", "# if correct pattern not found return False num, unit = int(tuples.group(1)), tuples.group(2)", "from North Pole Credentials, not a passport at all! Surely, nobody would mind", "False return ok def hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\") # compile regex return re.search(pat,", "most 193. If in, the number must be at least 59 and at", "__Here are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029", "Credentials, not a passport at all! Surely, nobody would mind if you made", "brn ecl invalid: wat pid valid: 000000001 pid invalid: 0123456789 __Here are some", "newlines. Passports are separated by blank lines. Here is an example batch file", "in (\"amb blu brn gry grn hzl oth\").split() def pid_check(txt): return txt.isdigit() and", "- test input is used print(\"Part1. Number of valid passports:\", part1(readfile())) print(\"Part2. Number", "third passport is interesting; the only missing field is cid, so it looks", "\"cm\": ok = num in range(150, 193 + 1) elif unit == \"in\":", "invalid: #123abz hcl invalid: 123abc ecl valid: brn ecl invalid: wat pid valid:", "in order to mark the end of the last document inlist.append(\"\") dic =", "hcl:74454a iyr:2023 pid:3556412378 byr:2007 __Here are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012", "and len(txt) == 9 if not extra: return True # checking extra rules", "= {} return data_list # 2 valid passports for part 1: testinput =", "unit == \"cm\": ok = num in range(150, 193 + 1) elif unit", "hcl (Hair Color) ecl (Eye Color) pid (Passport ID) cid (Country ID) Passport", "invalid - it is missing hgt (the Height field). The third passport is", "fields are as follows: byr (Birth Year) iyr (Issue Year) eyr (Expiration Year)", "= len(required) for doc in doclist: fields_found = 0 for r in required:", "for line in file] return inlist def parse_input(inlist=readfile()): data_list = [] # list", "__Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926", "ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\", ] def part2(inlist=testinput2) -> int:", "(\"amb blu brn gry grn hzl oth\").split() def pid_check(txt): return txt.isdigit() and len(txt)", "cid field, but each other field has strict rules about what values are", "are both present and valid according to the above rules. Here are some", "by blank lines. Here is an example batch file containing four passports: ecl:gry", "return txt in (\"amb blu brn gry grn hzl oth\").split() def pid_check(txt): return", "initial data input infilename = \"./day4.txt\" # required fields for checking required =", "# returns True if all checks pass def n_digits_check(txt: str, start: int, end:", "Year) eyr (Expiration Year) hgt (Height) hcl (Hair Color) ecl (Eye Color) pid", "iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm", "2020) and n_digits_check(doc[\"eyr\"], 2020, 2030) and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"])", "fourth passport is missing two fields, cid and byr. Missing cid is fine,", "if __name__ == \"__main__\": # if no parameter for part X - test", "(the Height field). The third passport is interesting; the only missing field is", "testinput2 = [ \"eyr:1972 cid:100 \\ hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\", \"\",", "cid (Country ID) Passport data is validated in batch files (your puzzle input).", "expected fields are as follows: byr (Birth Year) iyr (Issue Year) eyr (Expiration", "(Height) - a number followed by either cm or in: # If cm,", "most 2020. eyr (Expiration Year) - four digits; at least 2020 and at", "cm or in: If cm, the number must be at least 150 and", "the above rules. Here are some example values: byr valid: 2002 byr invalid:", "in doclist: fields_found = 0 for r in required: # check if all", "in batch files (your puzzle input). Each passport is represented as a sequence", "input is used print(\"Part1. Number of valid passports:\", part1(readfile())) print(\"Part2. Number of valid", "the document if r in doc: fields_found += 1 if fields_found == nreq", "hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944", "amb blu brn gry grn hzl oth. pid (Passport ID) - a nine-digit", "has strict rules about what values are valid for automatic validation: byr (Birth", "as a sequence of key:value pairs separated by spaces or newlines. Passports are", "# checking extra rules return ( n_digits_check(doc[\"byr\"], 1920, 2002) and n_digits_check(doc[\"iyr\"], 2010, 2020)", "byr (Birth Year) - four digits; at least 1920 and at most 2002.", "000000001 pid invalid: 0123456789 __Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb", "ecl valid: brn ecl invalid: wat pid valid: 000000001 pid invalid: 0123456789 __Here", "\"\", \"iyr:2019 \\ hcl:#602927 eyr:1967 hgt:170cm \\ ecl:grn pid:012533040 byr:1946\", \"\", \"pid:087499704 hgt:74in", "rules return ( n_digits_check(doc[\"byr\"], 1920, 2002) and n_digits_check(doc[\"iyr\"], 2010, 2020) and n_digits_check(doc[\"eyr\"], 2020,", "analyse(doclist, required, extra=False) -> int: # returns the number of valid documents according", "Each passport is represented as a sequence of key:value pairs separated by spaces", "\"\"\" The automatic passport scanners are slow because they're having trouble detecting which", "eight fields are present. The second passport is invalid - it is missing", "valid passports - those that have all required fields and valid values. Continue", "validation: byr (Birth Year) - four digits; at least 1920 and at most", "ecl_check(txt): return txt in (\"amb blu brn gry grn hzl oth\").split() def pid_check(txt):", "characters 0-9 or a-f. ecl (Eye Color) - exactly one of: amb blu", "# if no parameter for part X - test input is used print(\"Part1.", "represented as a sequence of key:value pairs separated by spaces or newlines. Passports", "is fine, but missing any other field is not, so this passport is", "valid documents according to fields listed in 'required' dictionary valid = 0 nreq", "num, unit = int(tuples.group(1)), tuples.group(2) if unit == \"cm\": ok = num in", "int, end: int, n=4): # check for n-digits ranges return len(txt) == n", "- a number followed by either cm or in: # If cm, the", "blu brn gry grn hzl oth\").split() def pid_check(txt): return txt.isdigit() and len(txt) ==", "= [ \"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\ byr:1937 iyr:2017 cid:147 hgt:183cm\", \"\", \"iyr:2013", "0 for r in required: # check if all required fields are found", "eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm", "0123456789 __Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018", "Year) iyr (Issue Year) eyr (Expiration Year) hgt (Height) hcl (Hair Color) ecl", "def pid_check(txt): return txt.isdigit() and len(txt) == 9 if not extra: return True", "76. hcl (Hair Color) - a # followed by exactly six characters 0-9", "as valid. The fourth passport is missing two fields, cid and byr. Missing", "range(150, 193 + 1) elif unit == \"in\": ok = num in range(59,", "and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) ) def analyse(doclist, required, extra=False) -> int: # returns", "# If in, the number must be at least 59 and at most", "True if all checks pass def n_digits_check(txt: str, start: int, end: int, n=4):", "four digits; at least 2010 and at most 2020. eyr (Expiration Year) -", "inlist.append(\"\") dic = {} for item in inlist: if item: # not an", "fields for checking required = {\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"} def", "passport is missing two fields, cid and byr. Missing cid is fine, but", "(Expiration Year) hgt (Height) hcl (Hair Color) ecl (Eye Color) pid (Passport ID)", "Year) hgt (Height) hcl (Hair Color) ecl (Eye Color) pid (Passport ID) cid", "only missing field is cid, so it looks like data from North Pole", "1) def hgt_check(txt): # hgt (Height) - a number followed by either cm", "193 + 1) elif unit == \"in\": ok = num in range(59, 76", "checking extra rules return ( n_digits_check(doc[\"byr\"], 1920, 2002) and n_digits_check(doc[\"iyr\"], 2010, 2020) and", "are found from the document if r in doc: fields_found += 1 if", "analyse(parse_input(inlist), required) # --- Part Two --- \"\"\" You can continue to ignore", "byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022", "passports - those that have all required fields. Treat cid as optional. In", "ignore missing cid fields. Treat this \"passport\" as valid. The fourth passport is", "fields listed in 'required' dictionary valid = 0 nreq = len(required) for doc", "example batch file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147", "system would report 2 valid passports. Count the number of valid passports -", "None def ecl_check(txt): return txt in (\"amb blu brn gry grn hzl oth\").split()", "byr:1937 iyr:2017 cid:147 hgt:183cm\", \"\", \"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \\ hcl:#cfa07d byr:1929\",", "number, including leading zeroes. cid (Country ID) - ignored, missing or not. Your", "not, so this passport is invalid. According to the above rules, your improved", "list of dictionaries # artificially add empty item in order to mark the", "or in: # If cm, the number must be at least 150 and", "pid valid: 000000001 pid invalid: 0123456789 __Here are some invalid passports: eyr:1972 cid:100", "eyr:2030 byr:1980 \\ hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:896056539 hcl:#a97842", "pid:89605653z hcl:#a97842 hgt:165cm\", ] def part2(inlist=testinput2) -> int: return analyse(parse_input(inlist), required, extra=True) #", "num in range(150, 193 + 1) elif unit == \"in\": ok = num", "invalid. According to the above rules, your improved system would report 2 valid", "pid:166559648 \\ iyr:2011 ecl:brn hgt:59in\", ] # --- Part One --- \"\"\" The", "nreq and extra_check(doc, extra): valid += 1 return valid def part1(inlist=testinput) -> int:", "return data_list # 2 valid passports for part 1: testinput = [ \"ecl:gry", "most 2030. hgt (Height) - a number followed by either cm or in:", "nreq = len(required) for doc in doclist: fields_found = 0 for r in", "returns number of valid documents return analyse(parse_input(inlist), required) # --- Part Two ---", "# --- MAIN --- if __name__ == \"__main__\": # if no parameter for", "pairs separated by spaces or newlines. Passports are separated by blank lines. Here", "hgt (the Height field). The third passport is interesting; the only missing field", "and n_digits_check(doc[\"iyr\"], 2010, 2020) and n_digits_check(doc[\"eyr\"], 2020, 2030) and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and", "number must be at least 150 and at most 193. If in, the", "item in item.split(): keyvalue = item.split(\":\") dic[keyvalue[0]] = keyvalue[1] else: # starts new", "in: # If cm, the number must be at least 150 and at", "end of the last document inlist.append(\"\") dic = {} for item in inlist:", "fields are present. The second passport is invalid - it is missing hgt", "return True # checking extra rules return ( n_digits_check(doc[\"byr\"], 1920, 2002) and n_digits_check(doc[\"iyr\"],", "\\ hcl:#602927 eyr:1967 hgt:170cm \\ ecl:grn pid:012533040 byr:1946\", \"\", \"pid:087499704 hgt:74in ecl:grn iyr:2012", "== \"__main__\": # if no parameter for part X - test input is", "and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) ) def analyse(doclist, required, extra=False)", "76 + 1) else: ok = False return ok def hcl_check(txt): pat =", "2 valid passports for part 2: testinput2 = [ \"eyr:1972 cid:100 \\ hcl:#18171d", "iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378", "valid: 190cm hgt invalid: 190in hgt invalid: 190 hcl valid: #123abc hcl invalid:", "Color) pid (Passport ID) cid (Country ID) Passport data is validated in batch", "def n_digits_check(txt: str, start: int, end: int, n=4): # check for n-digits ranges", "extra=False) -> int: # returns the number of valid documents according to fields", "at least 2010 and at most 2020. eyr (Expiration Year) - four digits;", "Continue to treat cid as optional. \"\"\" # 2 valid passports for part", "not a passport at all! Surely, nobody would mind if you made the", "not found return False num, unit = int(tuples.group(1)), tuples.group(2) if unit == \"cm\":", "field is not, so this passport is invalid. According to the above rules,", "#123abz hcl invalid: 123abc ecl valid: brn ecl invalid: wat pid valid: 000000001", "rules about what values are valid for automatic validation: byr (Birth Year) -", "in: If cm, the number must be at least 150 and at most", "cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl", "hgt:59in\", ] # --- Part One --- \"\"\" The automatic passport scanners are", "so it looks like data from North Pole Credentials, not a passport at", "part 1: testinput = [ \"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\ byr:1937 iyr:2017 cid:147", "eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first", "last document inlist.append(\"\") dic = {} for item in inlist: if item: #", "optional. In your batch file, how many passports are valid? \"\"\" def extra_check(doc:", "True # checking extra rules return ( n_digits_check(doc[\"byr\"], 1920, 2002) and n_digits_check(doc[\"iyr\"], 2010,", "byr:1946\", \"\", \"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \\ hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu", "tuples.group(2) if unit == \"cm\": ok = num in range(150, 193 + 1)", "hgt:165cm\", ] def part2(inlist=testinput2) -> int: return analyse(parse_input(inlist), required, extra=True) # --- MAIN", "document if r in doc: fields_found += 1 if fields_found == nreq and", "a sequence of key:value pairs separated by spaces or newlines. Passports are separated", "ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport", "new document data_list.append(dic) dic = {} return data_list # 2 valid passports for", "iyr:2017 cid:147 hgt:183cm\", \"\", \"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \\ hcl:#cfa07d byr:1929\", \"\",", "\\ hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013 \\ eyr:2024 \\ ecl:brn pid:760753108 byr:1931 \\", "eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025", "be at least 59 and at most 76. pat = re.compile(r\"(\\d+)(cm|in)\") # compile", "pid:760753108 byr:1931 \\ hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025 pid:166559648 \\ iyr:2011 ecl:brn hgt:59in\", ]", "iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The", "empty line => belongs to the same document for item in item.split(): keyvalue", "The fourth passport is missing two fields, cid and byr. Missing cid is", "passport is represented as a sequence of key:value pairs separated by spaces or", "2020, 2030) and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) ) def analyse(doclist,", "digits; at least 1920 and at most 2002. iyr (Issue Year) - four", "byr:2007 __Here are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f", "readfile(): with open(infilename, \"rt\", encoding=\"utf-8\") as file: inlist = [line.strip() for line in", "separated by spaces or newlines. Passports are separated by blank lines. Here is", "or a-f. ecl (Eye Color) - exactly one of: amb blu brn gry", "used print(\"Part1. Number of valid passports:\", part1(readfile())) print(\"Part2. Number of valid passports:\", part2(readfile()))", "exactly one of: amb blu brn gry grn hzl oth. pid (Passport ID)", "be at least 59 and at most 76. hcl (Hair Color) - a", "at most 193. # If in, the number must be at least 59", "must be at least 59 and at most 76. hcl (Hair Color) -", "data input infilename = \"./day4.txt\" # required fields for checking required = {\"byr\",", "is invalid - it is missing hgt (the Height field). The third passport", "return valid def part1(inlist=testinput) -> int: # returns number of valid documents return", "or in: If cm, the number must be at least 150 and at", "# 2 valid passports for part 1: testinput = [ \"ecl:gry pid:860033327 eyr:2020", "followed by exactly six characters 0-9 or a-f. ecl (Eye Color) - exactly", "not. Your job is to count the passports where all required fields are", "and n_digits_check(doc[\"eyr\"], 2020, 2030) and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) )", "four digits; at least 2020 and at most 2030. hgt (Height) - a", "The second passport is invalid - it is missing hgt (the Height field).", "part1(inlist=testinput) -> int: # returns number of valid documents return analyse(parse_input(inlist), required) #", "be at least 150 and at most 193. If in, the number must", "eyr (Expiration Year) hgt (Height) hcl (Hair Color) ecl (Eye Color) pid (Passport", "for n-digits ranges return len(txt) == n and int(txt) in range(start, end +", "dic[keyvalue[0]] = keyvalue[1] else: # starts new document data_list.append(dic) dic = {} return", "hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is valid - all", "193. # If in, the number must be at least 59 and at", "the last document inlist.append(\"\") dic = {} for item in inlist: if item:", "\"rt\", encoding=\"utf-8\") as file: inlist = [line.strip() for line in file] return inlist", "1 return valid def part1(inlist=testinput) -> int: # returns number of valid documents", "an empty line => belongs to the same document for item in item.split():", "hzl oth. pid (Passport ID) - a nine-digit number, including leading zeroes. cid", "hcl invalid: #123abz hcl invalid: 123abc ecl valid: brn ecl invalid: wat pid", "re.search(pat, txt) if not tuples: # if correct pattern not found return False", "iyr:2011 ecl:brn hgt:59in\", ] # --- Part One --- \"\"\" The automatic passport", "a # followed by exactly six characters 0-9 or a-f. ecl (Eye Color)", "exactly six characters 0-9 or a-f. ecl (Eye Color) - exactly one of:", "item: # not an empty line => belongs to the same document for", "Treat this \"passport\" as valid. The fourth passport is missing two fields, cid", "for automatic validation: byr (Birth Year) - four digits; at least 1920 and", "59 and at most 76. pat = re.compile(r\"(\\d+)(cm|in)\") # compile regex tuples =", "not an empty line => belongs to the same document for item in", "Part One --- \"\"\" The automatic passport scanners are slow because they're having", "four digits; at least 1920 and at most 2002. iyr (Issue Year) -", "hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946", "One --- \"\"\" The automatic passport scanners are slow because they're having trouble", "same document for item in item.split(): keyvalue = item.split(\":\") dic[keyvalue[0]] = keyvalue[1] else:", "which passports have all required fields. The expected fields are as follows: byr", "Treat cid as optional. In your batch file, how many passports are valid?", "len(txt) == 9 if not extra: return True # checking extra rules return", "optional. \"\"\" # 2 valid passports for part 2: testinput2 = [ \"eyr:1972", "range(59, 76 + 1) else: ok = False return ok def hcl_check(txt): pat", "and pid_check(doc[\"pid\"]) ) def analyse(doclist, required, extra=False) -> int: # returns the number", "iyr:2011 ecl:brn hgt:59in The first passport is valid - all eight fields are", "passports are valid? \"\"\" def extra_check(doc: dict, extra): # returns True if all", "- exactly one of: amb blu brn gry grn hzl oth. pid (Passport", "or not. Your job is to count the passports where all required fields", "hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \\ hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\", "hgt valid: 60in hgt valid: 190cm hgt invalid: 190in hgt invalid: 190 hcl", "Height field). The third passport is interesting; the only missing field is cid,", "parameter for part X - test input is used print(\"Part1. Number of valid", "Year) - four digits; at least 1920 and at most 2002. iyr (Issue", "123abc ecl valid: brn ecl invalid: wat pid valid: 000000001 pid invalid: 0123456789", "59 and at most 76. hcl (Hair Color) - a # followed by", "\\ hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\", \"\",", "txt in (\"amb blu brn gry grn hzl oth\").split() def pid_check(txt): return txt.isdigit()", "an example batch file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017", "# compile regex return re.search(pat, txt) != None def ecl_check(txt): return txt in", "eyr:1967 hgt:170cm \\ ecl:grn pid:012533040 byr:1946\", \"\", \"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980", "def parse_input(inlist=readfile()): data_list = [] # list of dictionaries # artificially add empty", "hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015", "temporarily ignore missing cid fields. Treat this \"passport\" as valid. The fourth passport", "number of valid documents return analyse(parse_input(inlist), required) # --- Part Two --- \"\"\"", "fields_found == nreq and extra_check(doc, extra): valid += 1 return valid def part1(inlist=testinput)", "and valid values. Continue to treat cid as optional. \"\"\" # 2 valid", "MAIN --- if __name__ == \"__main__\": # if no parameter for part X", "byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719", "2020. eyr (Expiration Year) - four digits; at least 2020 and at most", "ID) - ignored, missing or not. Your job is to count the passports", "file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013", "starts new document data_list.append(dic) dic = {} return data_list # 2 valid passports", "pid:028048884 \\ hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013 \\ eyr:2024 \\ ecl:brn pid:760753108 byr:1931", "compile regex return re.search(pat, txt) != None def ecl_check(txt): return txt in (\"amb", "missing field is cid, so it looks like data from North Pole Credentials,", "\\ hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\", \"\", \"iyr:2019 \\ hcl:#602927 eyr:1967 hgt:170cm", "your batch file, how many passports are valid? \"\"\" def extra_check(doc: dict, extra):", "\"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\", ] def part2(inlist=testinput2) ->", "end + 1) def hgt_check(txt): # hgt (Height) - a number followed by", "\"pid\"} def readfile(): with open(infilename, \"rt\", encoding=\"utf-8\") as file: inlist = [line.strip() for", "n_digits_check(doc[\"byr\"], 1920, 2002) and n_digits_check(doc[\"iyr\"], 2010, 2020) and n_digits_check(doc[\"eyr\"], 2020, 2030) and hgt_check(doc[\"hgt\"])", "\"hcl\", \"ecl\", \"pid\"} def readfile(): with open(infilename, \"rt\", encoding=\"utf-8\") as file: inlist =", "valid - all eight fields are present. The second passport is invalid -", "nine-digit number, including leading zeroes. cid (Country ID) - ignored, missing or not.", "hgt:170cm \\ ecl:grn pid:012533040 byr:1946\", \"\", \"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \\", "# returns number of valid documents return analyse(parse_input(inlist), required) # --- Part Two", "pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 __Here are", "for part X - test input is used print(\"Part1. Number of valid passports:\",", "passports have all required fields. The expected fields are as follows: byr (Birth", "fields. Treat cid as optional. In your batch file, how many passports are", "len(required) for doc in doclist: fields_found = 0 for r in required: #", "hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of valid passports -", "ecl (Eye Color) pid (Passport ID) cid (Country ID) Passport data is validated", "values: byr valid: 2002 byr invalid: 2003 hgt valid: 60in hgt valid: 190cm", "have all required fields. The expected fields are as follows: byr (Birth Year)", "of valid documents according to fields listed in 'required' dictionary valid = 0", "byr valid: 2002 byr invalid: 2003 hgt valid: 60in hgt valid: 190cm hgt", "documents according to fields listed in 'required' dictionary valid = 0 nreq =", "Count the number of valid passports - those that have all required fields", "document for item in item.split(): keyvalue = item.split(\":\") dic[keyvalue[0]] = keyvalue[1] else: #", "in inlist: if item: # not an empty line => belongs to the", "(Height) hcl (Hair Color) ecl (Eye Color) pid (Passport ID) cid (Country ID)", "most 193. # If in, the number must be at least 59 and", "other field has strict rules about what values are valid for automatic validation:", "in 'required' dictionary valid = 0 nreq = len(required) for doc in doclist:", "brn gry grn hzl oth. pid (Passport ID) - a nine-digit number, including", "at most 193. If in, the number must be at least 59 and", "no parameter for part X - test input is used print(\"Part1. Number of", "(Country ID) - ignored, missing or not. Your job is to count the", "in, the number must be at least 59 and at most 76. hcl", "hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm", "pid:093154719 Count the number of valid passports - those that have all required", "having trouble detecting which passports have all required fields. The expected fields are", "(Eye Color) - exactly one of: amb blu brn gry grn hzl oth.", "including leading zeroes. cid (Country ID) - ignored, missing or not. Your job", "return ( n_digits_check(doc[\"byr\"], 1920, 2002) and n_digits_check(doc[\"iyr\"], 2010, 2020) and n_digits_check(doc[\"eyr\"], 2020, 2030)", "start: int, end: int, n=4): # check for n-digits ranges return len(txt) ==", "the end of the last document inlist.append(\"\") dic = {} for item in", "Missing cid is fine, but missing any other field is not, so this", "iyr:2023 pid:3556412378 byr:2007 __Here are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030", "are slow because they're having trouble detecting which passports have all required fields.", "it looks like data from North Pole Credentials, not a passport at all!", "grn hzl oth. pid (Passport ID) - a nine-digit number, including leading zeroes.", "to count the passports where all required fields are both present and valid", "from the document if r in doc: fields_found += 1 if fields_found ==", "# --- Part One --- \"\"\" The automatic passport scanners are slow because", "a number followed by either cm or in: # If cm, the number", "for doc in doclist: fields_found = 0 for r in required: # check", "number must be at least 150 and at most 193. # If in,", "check if all required fields are found from the document if r in", "at most 2020. eyr (Expiration Year) - four digits; at least 2020 and", "at most 76. hcl (Hair Color) - a # followed by exactly six", "(Passport ID) - a nine-digit number, including leading zeroes. cid (Country ID) -", "document data_list.append(dic) dic = {} return data_list # 2 valid passports for part", "hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029", "byr:1989 \\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014", "like data from North Pole Credentials, not a passport at all! Surely, nobody", "iyr (Issue Year) - four digits; at least 2010 and at most 2020.", "eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn", "valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989", "cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the", "of dictionaries # artificially add empty item in order to mark the end", "improved system would report 2 valid passports. Count the number of valid passports", "number followed by either cm or in: If cm, the number must be", "all required fields are both present and valid according to the above rules.", "valid: #123abc hcl invalid: #123abz hcl invalid: 123abc ecl valid: brn ecl invalid:", "hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011", "eyr:2020 hcl:#fffffd \\ byr:1937 iyr:2017 cid:147 hgt:183cm\", \"\", \"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884", "the system temporarily ignore missing cid fields. Treat this \"passport\" as valid. The", "to ignore the cid field, but each other field has strict rules about", "least 59 and at most 76. pat = re.compile(r\"(\\d+)(cm|in)\") # compile regex tuples", "most 76. pat = re.compile(r\"(\\d+)(cm|in)\") # compile regex tuples = re.search(pat, txt) if", "if no parameter for part X - test input is used print(\"Part1. Number", "hgt invalid: 190 hcl valid: #123abc hcl invalid: #123abz hcl invalid: 123abc ecl", "iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:89605653z hcl:#a97842", "dict, extra): # returns True if all checks pass def n_digits_check(txt: str, start:", "both present and valid according to the above rules. Here are some example", "if fields_found == nreq and extra_check(doc, extra): valid += 1 return valid def", "all checks pass def n_digits_check(txt: str, start: int, end: int, n=4): # check", "== \"in\": ok = num in range(59, 76 + 1) else: ok =", "r in required: # check if all required fields are found from the", "fields. Treat this \"passport\" as valid. The fourth passport is missing two fields,", "- four digits; at least 1920 and at most 2002. iyr (Issue Year)", "hgt valid: 190cm hgt invalid: 190in hgt invalid: 190 hcl valid: #123abc hcl", "must be at least 150 and at most 193. # If in, the", "hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021", "\"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \\ hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989", "def part1(inlist=testinput) -> int: # returns number of valid documents return analyse(parse_input(inlist), required)", "automatic validation: byr (Birth Year) - four digits; at least 1920 and at", "hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu", "passport at all! Surely, nobody would mind if you made the system temporarily", "hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in", "hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 __Here are some valid passports: pid:087499704", "--- Part Two --- \"\"\" You can continue to ignore the cid field,", "example values: byr valid: 2002 byr invalid: 2003 hgt valid: 60in hgt valid:", "(Issue Year) eyr (Expiration Year) hgt (Height) hcl (Hair Color) ecl (Eye Color)", "--- Part One --- \"\"\" The automatic passport scanners are slow because they're", "return analyse(parse_input(inlist), required) # --- Part Two --- \"\"\" You can continue to", "scanners are slow because they're having trouble detecting which passports have all required", "to the above rules, your improved system would report 2 valid passports. Count", "return txt.isdigit() and len(txt) == 9 if not extra: return True # checking", "required, extra=True) # --- MAIN --- if __name__ == \"__main__\": # if no", "if correct pattern not found return False num, unit = int(tuples.group(1)), tuples.group(2) if", "hgt (Height) - a number followed by either cm or in: If cm,", "followed by either cm or in: # If cm, the number must be", "containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb", "hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\") # compile regex return re.search(pat, txt) != None def", "what values are valid for automatic validation: byr (Birth Year) - four digits;", "trouble detecting which passports have all required fields. The expected fields are as", "ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238", "artificially add empty item in order to mark the end of the last", "invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967", "least 150 and at most 193. # If in, the number must be", "all required fields are found from the document if r in doc: fields_found", "valid: brn ecl invalid: wat pid valid: 000000001 pid invalid: 0123456789 __Here are", "field). The third passport is interesting; the only missing field is cid, so", "[line.strip() for line in file] return inlist def parse_input(inlist=readfile()): data_list = [] #", "ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 __Here are some valid passports: pid:087499704 hgt:74in", "byr:1931 \\ hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025 pid:166559648 \\ iyr:2011 ecl:brn hgt:59in\", ] #", "the only missing field is cid, so it looks like data from North", "any other field is not, so this passport is invalid. According to the", "at least 59 and at most 76. hcl (Hair Color) - a #", "\\ eyr:2024 \\ ecl:brn pid:760753108 byr:1931 \\ hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025 pid:166559648 \\", "return ok def hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\") # compile regex return re.search(pat, txt)", "extra_check(doc, extra): valid += 1 return valid def part1(inlist=testinput) -> int: # returns", "eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of valid", "batch files (your puzzle input). Each passport is represented as a sequence of", "valid for automatic validation: byr (Birth Year) - four digits; at least 1920", "2010 and at most 2020. eyr (Expiration Year) - four digits; at least", "- a number followed by either cm or in: If cm, the number", "pid:3556412378 byr:2007 __Here are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980", "ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of valid passports - those that", "\\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:89605653z", "unit == \"in\": ok = num in range(59, 76 + 1) else: ok", "= {} for item in inlist: if item: # not an empty line", "cid:147 hgt:183cm\", \"\", \"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \\ hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1", "at most 76. pat = re.compile(r\"(\\d+)(cm|in)\") # compile regex tuples = re.search(pat, txt)", "item in inlist: if item: # not an empty line => belongs to", "n_digits_check(doc[\"iyr\"], 2010, 2020) and n_digits_check(doc[\"eyr\"], 2020, 2030) and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"])", "for checking required = {\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"} def readfile():", "(Height) - a number followed by either cm or in: If cm, the", "60in hgt valid: 190cm hgt invalid: 190in hgt invalid: 190 hcl valid: #123abc", "missing or not. Your job is to count the passports where all required", "valid passports. Count the number of valid passports - those that have all", "dictionaries # artificially add empty item in order to mark the end of", "(Birth Year) - four digits; at least 1920 and at most 2002. iyr", "data_list.append(dic) dic = {} return data_list # 2 valid passports for part 1:", "extra): # returns True if all checks pass def n_digits_check(txt: str, start: int,", "1920 and at most 2002. iyr (Issue Year) - four digits; at least", "by exactly six characters 0-9 or a-f. ecl (Eye Color) - exactly one", "invalid: wat pid valid: 000000001 pid invalid: 0123456789 __Here are some invalid passports:", "190 hcl valid: #123abc hcl invalid: #123abz hcl invalid: 123abc ecl valid: brn", "fields and valid values. Continue to treat cid as optional. \"\"\" # 2", "of key:value pairs separated by spaces or newlines. Passports are separated by blank", "Passport data is validated in batch files (your puzzle input). Each passport is", "either cm or in: # If cm, the number must be at least", "(Hair Color) ecl (Eye Color) pid (Passport ID) cid (Country ID) Passport data", "(Issue Year) - four digits; at least 2010 and at most 2020. eyr", "'required' dictionary valid = 0 nreq = len(required) for doc in doclist: fields_found", "fields, cid and byr. Missing cid is fine, but missing any other field", "According to the above rules, your improved system would report 2 valid passports.", "as file: inlist = [line.strip() for line in file] return inlist def parse_input(inlist=readfile()):", "hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a", "leading zeroes. cid (Country ID) - ignored, missing or not. Your job is", "byr:1926\", \"\", \"iyr:2019 \\ hcl:#602927 eyr:1967 hgt:170cm \\ ecl:grn pid:012533040 byr:1946\", \"\", \"pid:087499704", "not extra: return True # checking extra rules return ( n_digits_check(doc[\"byr\"], 1920, 2002)", "made the system temporarily ignore missing cid fields. Treat this \"passport\" as valid.", "1) elif unit == \"in\": ok = num in range(59, 76 + 1)", "keyvalue[1] else: # starts new document data_list.append(dic) dic = {} return data_list #", "190cm hgt invalid: 190in hgt invalid: 190 hcl valid: #123abc hcl invalid: #123abz", "are valid? \"\"\" def extra_check(doc: dict, extra): # returns True if all checks", "so this passport is invalid. According to the above rules, your improved system", "are as follows: byr (Birth Year) iyr (Issue Year) eyr (Expiration Year) hgt", "\"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029 ecl:blu", "hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) ) def analyse(doclist, required, extra=False) ->", "in, the number must be at least 59 and at most 76. pat", "193. If in, the number must be at least 59 and at most", "- a nine-digit number, including leading zeroes. cid (Country ID) - ignored, missing", "fields are both present and valid according to the above rules. Here are", "byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 __Here are some valid", "\"passport\" as valid. The fourth passport is missing two fields, cid and byr.", "int(tuples.group(1)), tuples.group(2) if unit == \"cm\": ok = num in range(150, 193 +", "+= 1 return valid def part1(inlist=testinput) -> int: # returns number of valid", "# not an empty line => belongs to the same document for item", "Part Two --- \"\"\" You can continue to ignore the cid field, but", "190in hgt invalid: 190 hcl valid: #123abc hcl invalid: #123abz hcl invalid: 123abc", "hgt invalid: 190in hgt invalid: 190 hcl valid: #123abc hcl invalid: #123abz hcl", "digits; at least 2020 and at most 2030. hgt (Height) - a number", "checking required = {\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"} def readfile(): with", "cid, so it looks like data from North Pole Credentials, not a passport", "the cid field, but each other field has strict rules about what values", "of: amb blu brn gry grn hzl oth. pid (Passport ID) - a", "part 2: testinput2 = [ \"eyr:1972 cid:100 \\ hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018", "# hgt (Height) - a number followed by either cm or in: #", "byr. Missing cid is fine, but missing any other field is not, so", "the passports where all required fields are both present and valid according to", "hcl:#602927 eyr:1967 hgt:170cm \\ ecl:grn pid:012533040 byr:1946\", \"\", \"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030", "byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410", "at least 150 and at most 193. # If in, the number must", "eyr:2021 pid:093154719 Count the number of valid passports - those that have all", "return False num, unit = int(tuples.group(1)), tuples.group(2) if unit == \"cm\": ok =", "blank lines. Here is an example batch file containing four passports: ecl:gry pid:860033327", "least 59 and at most 76. hcl (Hair Color) - a # followed", "pattern not found return False num, unit = int(tuples.group(1)), tuples.group(2) if unit ==", "byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is valid", "strict rules about what values are valid for automatic validation: byr (Birth Year)", "invalid: 2003 hgt valid: 60in hgt valid: 190cm hgt invalid: 190in hgt invalid:", "valid? \"\"\" def extra_check(doc: dict, extra): # returns True if all checks pass", "passports where all required fields are both present and valid according to the", "pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number", "cid is fine, but missing any other field is not, so this passport", "hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of valid passports - those", "have all required fields and valid values. Continue to treat cid as optional.", "150 and at most 193. # If in, the number must be at", "a-f. ecl (Eye Color) - exactly one of: amb blu brn gry grn", "and byr. Missing cid is fine, but missing any other field is not,", "passport is invalid - it is missing hgt (the Height field). The third", "ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\", \"\", \"iyr:2019 \\ hcl:#602927 eyr:1967 hgt:170cm \\ ecl:grn", "eyr:2024 \\ ecl:brn pid:760753108 byr:1931 \\ hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025 pid:166559648 \\ iyr:2011", "\"./day4.txt\" # required fields for checking required = {\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\",", "you made the system temporarily ignore missing cid fields. Treat this \"passport\" as", "\"hgt\", \"hcl\", \"ecl\", \"pid\"} def readfile(): with open(infilename, \"rt\", encoding=\"utf-8\") as file: inlist", "byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013 \\ eyr:2024 \\ ecl:brn pid:760753108 byr:1931 \\ hgt:179cm\", \"\",", "eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 __Here are some", "byr:1944 eyr:2021 pid:093154719 Count the number of valid passports - those that have", "- four digits; at least 2020 and at most 2030. hgt (Height) -", "rules. Here are some example values: byr valid: 2002 byr invalid: 2003 hgt", "2002. iyr (Issue Year) - four digits; at least 2010 and at most", "- those that have all required fields and valid values. Continue to treat", "open(infilename, \"rt\", encoding=\"utf-8\") as file: inlist = [line.strip() for line in file] return", "listed in 'required' dictionary valid = 0 nreq = len(required) for doc in", "pid_check(txt): return txt.isdigit() and len(txt) == 9 if not extra: return True #", "int(txt) in range(start, end + 1) def hgt_check(txt): # hgt (Height) - a", "oth. pid (Passport ID) - a nine-digit number, including leading zeroes. cid (Country", "if you made the system temporarily ignore missing cid fields. Treat this \"passport\"", "r in doc: fields_found += 1 if fields_found == nreq and extra_check(doc, extra):", "and at most 2002. iyr (Issue Year) - four digits; at least 2010", "other field is not, so this passport is invalid. According to the above", "hcl (Hair Color) - a # followed by exactly six characters 0-9 or", "documents return analyse(parse_input(inlist), required) # --- Part Two --- \"\"\" You can continue", "2030. hgt (Height) - a number followed by either cm or in: If", "hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is valid -", "cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn", "hgt:59in The first passport is valid - all eight fields are present. The", "is used print(\"Part1. Number of valid passports:\", part1(readfile())) print(\"Part2. Number of valid passports:\",", "spaces or newlines. Passports are separated by blank lines. Here is an example", "data_list # 2 valid passports for part 1: testinput = [ \"ecl:gry pid:860033327", "separated by blank lines. Here is an example batch file containing four passports:", "is an example batch file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937", "and extra_check(doc, extra): valid += 1 return valid def part1(inlist=testinput) -> int: #", "eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277", "blu brn gry grn hzl oth. pid (Passport ID) - a nine-digit number,", "required = {\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"} def readfile(): with open(infilename,", "values. Continue to treat cid as optional. \"\"\" # 2 valid passports for", "with open(infilename, \"rt\", encoding=\"utf-8\") as file: inlist = [line.strip() for line in file]", "the same document for item in item.split(): keyvalue = item.split(\":\") dic[keyvalue[0]] = keyvalue[1]", "this passport is invalid. According to the above rules, your improved system would", "passports for part 2: testinput2 = [ \"eyr:1972 cid:100 \\ hcl:#18171d ecl:amb hgt:170", "hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\", ]", "check for n-digits ranges return len(txt) == n and int(txt) in range(start, end", "\"\", \"hcl:#cfa07d eyr:2025 pid:166559648 \\ iyr:2011 ecl:brn hgt:59in\", ] # --- Part One", "cid:129 byr:1989 \\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\", "a number followed by either cm or in: If cm, the number must", "hgt:170 pid:186cm iyr:2018 byr:1926\", \"\", \"iyr:2019 \\ hcl:#602927 eyr:1967 hgt:170cm \\ ecl:grn pid:012533040", "hzl oth\").split() def pid_check(txt): return txt.isdigit() and len(txt) == 9 if not extra:", "according to fields listed in 'required' dictionary valid = 0 nreq = len(required)", "--- MAIN --- if __name__ == \"__main__\": # if no parameter for part", "required fields are found from the document if r in doc: fields_found +=", "[] # list of dictionaries # artificially add empty item in order to", "hgt (Height) - a number followed by either cm or in: # If", "cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d", "all! Surely, nobody would mind if you made the system temporarily ignore missing", "either cm or in: If cm, the number must be at least 150", "\"iyr:2019 \\ hcl:#602927 eyr:1967 hgt:170cm \\ ecl:grn pid:012533040 byr:1946\", \"\", \"pid:087499704 hgt:74in ecl:grn", "pid:012533040 byr:1946\", \"\", \"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \\ hcl:#623a2f\", \"\", \"eyr:2029", "are separated by blank lines. Here is an example batch file containing four", "- all eight fields are present. The second passport is invalid - it", "ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz", "__name__ == \"__main__\": # if no parameter for part X - test input", "item.split(): keyvalue = item.split(\":\") dic[keyvalue[0]] = keyvalue[1] else: # starts new document data_list.append(dic)", "for part 1: testinput = [ \"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\ byr:1937 iyr:2017", "txt.isdigit() and len(txt) == 9 if not extra: return True # checking extra", "but missing any other field is not, so this passport is invalid. According", "ecl:amb cid:350 eyr:2023 pid:028048884 \\ hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013 \\ eyr:2024 \\", "] # --- Part One --- \"\"\" The automatic passport scanners are slow", "76. pat = re.compile(r\"(\\d+)(cm|in)\") # compile regex tuples = re.search(pat, txt) if not", "number of valid documents according to fields listed in 'required' dictionary valid =", "files (your puzzle input). Each passport is represented as a sequence of key:value", "Here is an example batch file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd", "hgt_check(txt): # hgt (Height) - a number followed by either cm or in:", "elif unit == \"in\": ok = num in range(59, 76 + 1) else:", "ok = False return ok def hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\") # compile regex", "iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of valid passports", "all required fields. The expected fields are as follows: byr (Birth Year) iyr", "re.compile(r\"#[a-f0-9]{6}\") # compile regex return re.search(pat, txt) != None def ecl_check(txt): return txt", "it is missing hgt (the Height field). The third passport is interesting; the", "{\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"} def readfile(): with open(infilename, \"rt\", encoding=\"utf-8\")", "== nreq and extra_check(doc, extra): valid += 1 return valid def part1(inlist=testinput) ->", "brn gry grn hzl oth\").split() def pid_check(txt): return txt.isdigit() and len(txt) == 9", "dic = {} for item in inlist: if item: # not an empty", "+ 1) elif unit == \"in\": ok = num in range(59, 76 +", "Two --- \"\"\" You can continue to ignore the cid field, but each", "ID) cid (Country ID) Passport data is validated in batch files (your puzzle", "eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is valid - all eight", "required) # --- Part Two --- \"\"\" You can continue to ignore the", "def ecl_check(txt): return txt in (\"amb blu brn gry grn hzl oth\").split() def", "empty item in order to mark the end of the last document inlist.append(\"\")", "str, start: int, end: int, n=4): # check for n-digits ranges return len(txt)", "missing cid fields. Treat this \"passport\" as valid. The fourth passport is missing", "In your batch file, how many passports are valid? \"\"\" def extra_check(doc: dict,", "The third passport is interesting; the only missing field is cid, so it", "and valid according to the above rules. Here are some example values: byr", "\"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"} def readfile(): with open(infilename, \"rt\", encoding=\"utf-8\") as", "2030) and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) ) def analyse(doclist, required,", "cid as optional. \"\"\" # 2 valid passports for part 2: testinput2 =", "-> int: return analyse(parse_input(inlist), required, extra=True) # --- MAIN --- if __name__ ==", "and at most 193. # If in, the number must be at least", "at least 59 and at most 76. pat = re.compile(r\"(\\d+)(cm|in)\") # compile regex", "ecl:grn iyr:2012 eyr:2030 byr:1980 \\ hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014", "valid passports for part 2: testinput2 = [ \"eyr:1972 cid:100 \\ hcl:#18171d ecl:amb", "pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is", "def readfile(): with open(infilename, \"rt\", encoding=\"utf-8\") as file: inlist = [line.strip() for line", "ignore the cid field, but each other field has strict rules about what", "regex return re.search(pat, txt) != None def ecl_check(txt): return txt in (\"amb blu", "The automatic passport scanners are slow because they're having trouble detecting which passports", "if item: # not an empty line => belongs to the same document", "- four digits; at least 2010 and at most 2020. eyr (Expiration Year)", "encoding=\"utf-8\") as file: inlist = [line.strip() for line in file] return inlist def", "iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931", "- those that have all required fields. Treat cid as optional. In your", "cm, the number must be at least 150 and at most 193. #", "all eight fields are present. The second passport is invalid - it is", "your improved system would report 2 valid passports. Count the number of valid", "9 if not extra: return True # checking extra rules return ( n_digits_check(doc[\"byr\"],", "is missing hgt (the Height field). The third passport is interesting; the only", "gry grn hzl oth. pid (Passport ID) - a nine-digit number, including leading", "by either cm or in: # If cm, the number must be at", "\"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \\ hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013 \\ eyr:2024", "at least 1920 and at most 2002. iyr (Issue Year) - four digits;", "line in file] return inlist def parse_input(inlist=readfile()): data_list = [] # list of", "second passport is invalid - it is missing hgt (the Height field). The", "hcl valid: #123abc hcl invalid: #123abz hcl invalid: 123abc ecl valid: brn ecl", "must be at least 59 and at most 76. pat = re.compile(r\"(\\d+)(cm|in)\") #", "ok def hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\") # compile regex return re.search(pat, txt) !=", "would report 2 valid passports. Count the number of valid passports - those", "how many passports are valid? \"\"\" def extra_check(doc: dict, extra): # returns True", "pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d", "int, n=4): # check for n-digits ranges return len(txt) == n and int(txt)", "follows: byr (Birth Year) iyr (Issue Year) eyr (Expiration Year) hgt (Height) hcl", "pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is valid - all eight fields", "int: # returns the number of valid documents according to fields listed in", "order to mark the end of the last document inlist.append(\"\") dic = {}", "can continue to ignore the cid field, but each other field has strict", "six characters 0-9 or a-f. ecl (Eye Color) - exactly one of: amb", "invalid: 190 hcl valid: #123abc hcl invalid: #123abz hcl invalid: 123abc ecl valid:", "ecl invalid: wat pid valid: 000000001 pid invalid: 0123456789 __Here are some invalid", "most 2002. iyr (Issue Year) - four digits; at least 2010 and at", "required, extra=False) -> int: # returns the number of valid documents according to", "treat cid as optional. \"\"\" # 2 valid passports for part 2: testinput2", "required: # check if all required fields are found from the document if", "inlist: if item: # not an empty line => belongs to the same", "Color) - exactly one of: amb blu brn gry grn hzl oth. pid", "= \"./day4.txt\" # required fields for checking required = {\"byr\", \"iyr\", \"eyr\", \"hgt\",", "parse_input(inlist=readfile()): data_list = [] # list of dictionaries # artificially add empty item", "cid and byr. Missing cid is fine, but missing any other field is", "ignored, missing or not. Your job is to count the passports where all", "above rules. Here are some example values: byr valid: 2002 byr invalid: 2003", "] def part2(inlist=testinput2) -> int: return analyse(parse_input(inlist), required, extra=True) # --- MAIN ---", "- a # followed by exactly six characters 0-9 or a-f. ecl (Eye", "to treat cid as optional. \"\"\" # 2 valid passports for part 2:", "fields are found from the document if r in doc: fields_found += 1", "hgt:183cm\", \"\", \"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \\ hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013", "the above rules, your improved system would report 2 valid passports. Count the", "wat pid valid: 000000001 pid invalid: 0123456789 __Here are some invalid passports: eyr:1972", "-> int: # returns number of valid documents return analyse(parse_input(inlist), required) # ---", "be at least 150 and at most 193. # If in, the number", "sequence of key:value pairs separated by spaces or newlines. Passports are separated by", "for r in required: # check if all required fields are found from", "least 150 and at most 193. If in, the number must be at", "one of: amb blu brn gry grn hzl oth. pid (Passport ID) -", "# starts new document data_list.append(dic) dic = {} return data_list # 2 valid", "2: testinput2 = [ \"eyr:1972 cid:100 \\ hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\",", "least 2010 and at most 2020. eyr (Expiration Year) - four digits; at", "else: # starts new document data_list.append(dic) dic = {} return data_list # 2", "are some example values: byr valid: 2002 byr invalid: 2003 hgt valid: 60in", "ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl", "if not tuples: # if correct pattern not found return False num, unit", "eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929", "hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025 pid:166559648 \\ iyr:2011 ecl:brn hgt:59in\", ] # --- Part", "the number of valid documents according to fields listed in 'required' dictionary valid", "and at most 2030. hgt (Height) - a number followed by either cm", "pass def n_digits_check(txt: str, start: int, end: int, n=4): # check for n-digits", "oth\").split() def pid_check(txt): return txt.isdigit() and len(txt) == 9 if not extra: return", "(Eye Color) pid (Passport ID) cid (Country ID) Passport data is validated in", "valid: 2002 byr invalid: 2003 hgt valid: 60in hgt valid: 190cm hgt invalid:", "pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038", "many passports are valid? \"\"\" def extra_check(doc: dict, extra): # returns True if", "# compile regex tuples = re.search(pat, txt) if not tuples: # if correct", "passports for part 1: testinput = [ \"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\ byr:1937", "doc in doclist: fields_found = 0 for r in required: # check if", "pid:896056539 hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\",", "ecl:grn pid:012533040 byr:1946\", \"\", \"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \\ hcl:#623a2f\", \"\",", "some example values: byr valid: 2002 byr invalid: 2003 hgt valid: 60in hgt", "2 valid passports. Count the number of valid passports - those that have", "not tuples: # if correct pattern not found return False num, unit =", "automatic passport scanners are slow because they're having trouble detecting which passports have", "values are valid for automatic validation: byr (Birth Year) - four digits; at", "the number must be at least 150 and at most 193. If in,", "are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019", "(your puzzle input). Each passport is represented as a sequence of key:value pairs", "missing two fields, cid and byr. Missing cid is fine, but missing any", "Your job is to count the passports where all required fields are both", "!= None def ecl_check(txt): return txt in (\"amb blu brn gry grn hzl", "data_list = [] # list of dictionaries # artificially add empty item in", "about what values are valid for automatic validation: byr (Birth Year) - four", "line => belongs to the same document for item in item.split(): keyvalue =", "are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu", "\"hcl:#ae17e1 iyr:2013 \\ eyr:2024 \\ ecl:brn pid:760753108 byr:1931 \\ hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025", "at least 150 and at most 193. If in, the number must be", "ranges return len(txt) == n and int(txt) in range(start, end + 1) def", "Pole Credentials, not a passport at all! Surely, nobody would mind if you", "passport is invalid. According to the above rules, your improved system would report", "field has strict rules about what values are valid for automatic validation: byr", "== n and int(txt) in range(start, end + 1) def hgt_check(txt): # hgt", "file] return inlist def parse_input(inlist=readfile()): data_list = [] # list of dictionaries #", "is validated in batch files (your puzzle input). Each passport is represented as", "the number must be at least 59 and at most 76. pat =", "ID) Passport data is validated in batch files (your puzzle input). Each passport", "pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn", "count the passports where all required fields are both present and valid according", "- ignored, missing or not. Your job is to count the passports where", "iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024", "hgt:165cm\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\", ] def", "dictionary valid = 0 nreq = len(required) for doc in doclist: fields_found =", "== \"cm\": ok = num in range(150, 193 + 1) elif unit ==", "validated in batch files (your puzzle input). Each passport is represented as a", "\"\", \"hcl:#ae17e1 iyr:2013 \\ eyr:2024 \\ ecl:brn pid:760753108 byr:1931 \\ hgt:179cm\", \"\", \"hcl:#cfa07d", "# --- Part Two --- \"\"\" You can continue to ignore the cid", "number followed by either cm or in: # If cm, the number must", "according to the above rules. Here are some example values: byr valid: 2002", "batch file, how many passports are valid? \"\"\" def extra_check(doc: dict, extra): #", "item.split(\":\") dic[keyvalue[0]] = keyvalue[1] else: # starts new document data_list.append(dic) dic = {}", "required fields. The expected fields are as follows: byr (Birth Year) iyr (Issue", "byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn", "each other field has strict rules about what values are valid for automatic", "and at most 76. pat = re.compile(r\"(\\d+)(cm|in)\") # compile regex tuples = re.search(pat,", "iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010", "= {\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"} def readfile(): with open(infilename, \"rt\",", "if all checks pass def n_digits_check(txt: str, start: int, end: int, n=4): #", "pid:860033327 eyr:2020 hcl:#fffffd \\ byr:1937 iyr:2017 cid:147 hgt:183cm\", \"\", \"iyr:2013 ecl:amb cid:350 eyr:2023", "iyr:2012 eyr:2030 byr:1980 \\ hcl:#623a2f\", \"\", \"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:896056539", "- it is missing hgt (the Height field). The third passport is interesting;", "{} for item in inlist: if item: # not an empty line =>", "the number of valid passports - those that have all required fields. Treat", "1) else: ok = False return ok def hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\") #", "re.search(pat, txt) != None def ecl_check(txt): return txt in (\"amb blu brn gry", "is to count the passports where all required fields are both present and", "\"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"} def readfile(): with open(infilename, \"rt\", encoding=\"utf-8\") as file:", "found return False num, unit = int(tuples.group(1)), tuples.group(2) if unit == \"cm\": ok", "valid according to the above rules. Here are some example values: byr valid:", "n and int(txt) in range(start, end + 1) def hgt_check(txt): # hgt (Height)", "num in range(59, 76 + 1) else: ok = False return ok def", "if not extra: return True # checking extra rules return ( n_digits_check(doc[\"byr\"], 1920,", "passport is interesting; the only missing field is cid, so it looks like", "regex tuples = re.search(pat, txt) if not tuples: # if correct pattern not", "grn hzl oth\").split() def pid_check(txt): return txt.isdigit() and len(txt) == 9 if not", "def hgt_check(txt): # hgt (Height) - a number followed by either cm or", "tuples: # if correct pattern not found return False num, unit = int(tuples.group(1)),", "n_digits_check(doc[\"eyr\"], 2020, 2030) and hgt_check(doc[\"hgt\"]) and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) ) def", "iyr (Issue Year) eyr (Expiration Year) hgt (Height) hcl (Hair Color) ecl (Eye", "followed by either cm or in: If cm, the number must be at", "Color) - a # followed by exactly six characters 0-9 or a-f. ecl", "return analyse(parse_input(inlist), required, extra=True) # --- MAIN --- if __name__ == \"__main__\": #", "The expected fields are as follows: byr (Birth Year) iyr (Issue Year) eyr", "returns True if all checks pass def n_digits_check(txt: str, start: int, end: int,", "Surely, nobody would mind if you made the system temporarily ignore missing cid", "and at most 76. hcl (Hair Color) - a # followed by exactly", "The first passport is valid - all eight fields are present. The second", ") def analyse(doclist, required, extra=False) -> int: # returns the number of valid", "= 0 for r in required: # check if all required fields are", "\"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\ byr:1937 iyr:2017 cid:147 hgt:183cm\", \"\", \"iyr:2013 ecl:amb cid:350", "ecl:brn hgt:59in\", ] # --- Part One --- \"\"\" The automatic passport scanners", "fields. The expected fields are as follows: byr (Birth Year) iyr (Issue Year)", "rules, your improved system would report 2 valid passports. Count the number of", "ok = num in range(59, 76 + 1) else: ok = False return", "are valid for automatic validation: byr (Birth Year) - four digits; at least", "2 valid passports for part 1: testinput = [ \"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd", "# check if all required fields are found from the document if r", "eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88", "of the last document inlist.append(\"\") dic = {} for item in inlist: if", "checks pass def n_digits_check(txt: str, start: int, end: int, n=4): # check for", "compile regex tuples = re.search(pat, txt) if not tuples: # if correct pattern", "iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\", ] def part2(inlist=testinput2) -> int: return analyse(parse_input(inlist), required, extra=True)", "valid passports - those that have all required fields. Treat cid as optional.", "slow because they're having trouble detecting which passports have all required fields. The", "but each other field has strict rules about what values are valid for", "file, how many passports are valid? \"\"\" def extra_check(doc: dict, extra): # returns", "is missing two fields, cid and byr. Missing cid is fine, but missing", "that have all required fields and valid values. Continue to treat cid as", "and hcl_check(doc[\"hcl\"]) and ecl_check(doc[\"ecl\"]) and pid_check(doc[\"pid\"]) ) def analyse(doclist, required, extra=False) -> int:", "passport scanners are slow because they're having trouble detecting which passports have all", "+ 1) else: ok = False return ok def hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\")", "cid:350 eyr:2023 pid:028048884 \\ hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013 \\ eyr:2024 \\ ecl:brn", "You can continue to ignore the cid field, but each other field has", "# artificially add empty item in order to mark the end of the", "data from North Pole Credentials, not a passport at all! Surely, nobody would", "for item in inlist: if item: # not an empty line => belongs", "valid passports for part 1: testinput = [ \"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \\", "def extra_check(doc: dict, extra): # returns True if all checks pass def n_digits_check(txt:", "number must be at least 59 and at most 76. pat = re.compile(r\"(\\d+)(cm|in)\")", "-> int: # returns the number of valid documents according to fields listed", "150 and at most 193. If in, the number must be at least", "by either cm or in: If cm, the number must be at least", "as optional. In your batch file, how many passports are valid? \"\"\" def", "hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842", "is valid - all eight fields are present. The second passport is invalid", "# required fields for checking required = {\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\",", "== 9 if not extra: return True # checking extra rules return (", "are present. The second passport is invalid - it is missing hgt (the", "at least 2020 and at most 2030. hgt (Height) - a number followed", "iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count", "job is to count the passports where all required fields are both present", "present. The second passport is invalid - it is missing hgt (the Height", "def hcl_check(txt): pat = re.compile(r\"#[a-f0-9]{6}\") # compile regex return re.search(pat, txt) != None", "pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm", "\"ecl\", \"pid\"} def readfile(): with open(infilename, \"rt\", encoding=\"utf-8\") as file: inlist = [line.strip()", "ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm", "of valid passports - those that have all required fields. Treat cid as", "field is cid, so it looks like data from North Pole Credentials, not", "(Birth Year) iyr (Issue Year) eyr (Expiration Year) hgt (Height) hcl (Hair Color)", "to mark the end of the last document inlist.append(\"\") dic = {} for", "correct pattern not found return False num, unit = int(tuples.group(1)), tuples.group(2) if unit", "extra rules return ( n_digits_check(doc[\"byr\"], 1920, 2002) and n_digits_check(doc[\"iyr\"], 2010, 2020) and n_digits_check(doc[\"eyr\"],", "( n_digits_check(doc[\"byr\"], 1920, 2002) and n_digits_check(doc[\"iyr\"], 2010, 2020) and n_digits_check(doc[\"eyr\"], 2020, 2030) and", "Year) - four digits; at least 2020 and at most 2030. hgt (Height)", "or newlines. Passports are separated by blank lines. Here is an example batch", "hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023", "by spaces or newlines. Passports are separated by blank lines. Here is an", "at most 2030. hgt (Height) - a number followed by either cm or", "pat = re.compile(r\"(\\d+)(cm|in)\") # compile regex tuples = re.search(pat, txt) if not tuples:", "would mind if you made the system temporarily ignore missing cid fields. Treat", "of valid documents return analyse(parse_input(inlist), required) # --- Part Two --- \"\"\" You", "--- \"\"\" You can continue to ignore the cid field, but each other", "the number must be at least 59 and at most 76. hcl (Hair", "# 2 valid passports for part 2: testinput2 = [ \"eyr:1972 cid:100 \\", "False num, unit = int(tuples.group(1)), tuples.group(2) if unit == \"cm\": ok = num", "X - test input is used print(\"Part1. Number of valid passports:\", part1(readfile())) print(\"Part2.", "required fields. Treat cid as optional. In your batch file, how many passports", "missing hgt (the Height field). The third passport is interesting; the only missing", "\\ hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025 pid:166559648 \\ iyr:2011 ecl:brn hgt:59in\", ] # ---", "pid_check(doc[\"pid\"]) ) def analyse(doclist, required, extra=False) -> int: # returns the number of", "a passport at all! Surely, nobody would mind if you made the system", "nobody would mind if you made the system temporarily ignore missing cid fields.", "input infilename = \"./day4.txt\" # required fields for checking required = {\"byr\", \"iyr\",", "inlist def parse_input(inlist=readfile()): data_list = [] # list of dictionaries # artificially add", "invalid: 123abc ecl valid: brn ecl invalid: wat pid valid: 000000001 pid invalid:", "txt) if not tuples: # if correct pattern not found return False num,", "ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of", "interesting; the only missing field is cid, so it looks like data from", "for item in item.split(): keyvalue = item.split(\":\") dic[keyvalue[0]] = keyvalue[1] else: # starts", "dic = {} return data_list # 2 valid passports for part 1: testinput", "# If cm, the number must be at least 150 and at most", "doc: fields_found += 1 if fields_found == nreq and extra_check(doc, extra): valid +=", "eyr:2025 pid:166559648 \\ iyr:2011 ecl:brn hgt:59in\", ] # --- Part One --- \"\"\"", "= num in range(150, 193 + 1) elif unit == \"in\": ok =", "in range(59, 76 + 1) else: ok = False return ok def hcl_check(txt):", "all required fields and valid values. Continue to treat cid as optional. \"\"\"", "in range(150, 193 + 1) elif unit == \"in\": ok = num in", "0 nreq = len(required) for doc in doclist: fields_found = 0 for r", "data is validated in batch files (your puzzle input). Each passport is represented", "return re.search(pat, txt) != None def ecl_check(txt): return txt in (\"amb blu brn", "cid:129 byr:1989 \\ iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm\", ] def part2(inlist=testinput2) -> int: return", "byr (Birth Year) iyr (Issue Year) eyr (Expiration Year) hgt (Height) hcl (Hair", "\"\"\" You can continue to ignore the cid field, but each other field", "pid (Passport ID) - a nine-digit number, including leading zeroes. cid (Country ID)", "def part2(inlist=testinput2) -> int: return analyse(parse_input(inlist), required, extra=True) # --- MAIN --- if", "= re.compile(r\"(\\d+)(cm|in)\") # compile regex tuples = re.search(pat, txt) if not tuples: #", "doclist: fields_found = 0 for r in required: # check if all required", "iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020", "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884", "iyr:2013 \\ eyr:2024 \\ ecl:brn pid:760753108 byr:1931 \\ hgt:179cm\", \"\", \"hcl:#cfa07d eyr:2025 pid:166559648", "pid:186cm iyr:2018 byr:1926\", \"\", \"iyr:2019 \\ hcl:#602927 eyr:1967 hgt:170cm \\ ecl:grn pid:012533040 byr:1946\",", "0-9 or a-f. ecl (Eye Color) - exactly one of: amb blu brn", "\\ iyr:2011 ecl:brn hgt:59in\", ] # --- Part One --- \"\"\" The automatic", "a nine-digit number, including leading zeroes. cid (Country ID) - ignored, missing or", "extra=True) # --- MAIN --- if __name__ == \"__main__\": # if no parameter", "at all! Surely, nobody would mind if you made the system temporarily ignore", "detecting which passports have all required fields. The expected fields are as follows:", "this \"passport\" as valid. The fourth passport is missing two fields, cid and", "passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023", "= num in range(59, 76 + 1) else: ok = False return ok", "= [line.strip() for line in file] return inlist def parse_input(inlist=readfile()): data_list = []", "n-digits ranges return len(txt) == n and int(txt) in range(start, end + 1)", "\"\"\" def extra_check(doc: dict, extra): # returns True if all checks pass def", "eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 __Here are some valid passports: pid:087499704 hgt:74in ecl:grn", "required fields and valid values. Continue to treat cid as optional. \"\"\" #", "= [] # list of dictionaries # artificially add empty item in order", "\"eyr:2029 ecl:blu cid:129 byr:1989 \\ iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\", \"\", \"eyr:2029 ecl:blu cid:129", "\"eyr:1972 cid:100 \\ hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\", \"\", \"iyr:2019 \\ hcl:#602927", "add empty item in order to mark the end of the last document", "int: # returns number of valid documents return analyse(parse_input(inlist), required) # --- Part", "hcl:#cfa07d byr:1929\", \"\", \"hcl:#ae17e1 iyr:2013 \\ eyr:2024 \\ ecl:brn pid:760753108 byr:1931 \\ hgt:179cm\"," ]
[ "FiniteAutomaton({(A, a): B}, A, {B}) C = State('C') D = State('D') fa_2 =", "= fa1.copy() assert fa1 is not fa2 def test_evaluate(): a = Symbol('a') A,", "fa.complete() def test_negate(): a = Symbol('a') b = Symbol('b') A = State('A') B", "{A}, (B, a): {A}, (B, b): {B}, } fa1 = FiniteAutomaton(transitions, A, {A})", "{ (A, a): {B}, (B, a): {A}, } fa = FiniteAutomaton(transitions, A, {A})", "{H}, (H, b): {D}, } fa = FiniteAutomaton(transitions, A, [A, D, G]) fa", "= { (A, a): {G}, (A, b): {B}, (B, a): {F}, (B, b):", "= FiniteAutomaton({(A, a): B}, A, {B}) C, D = State('C'), State('D') fa_2 =", "(D, b): {H}, (E, a): {E}, (E, b): {A}, (F, a): {B}, (F,", "not fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba')) def test_concatenate(): a = Symbol('a') A = State('A')", "} fa = FiniteAutomaton(transitions, A, {A}) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa'))", "test_complete(): a = Symbol('a') b = Symbol('b') A = State('A') B = State('B')", "a): {A}, } fa = FiniteAutomaton(transitions, A, {A}) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence(''))", "6 def test_minimize(): a = Symbol('a') b = Symbol('b') A = State('A') B", "b): {D}, } fa = FiniteAutomaton(transitions, A, [A, D, G]) fa.remove_unreachable_states() assert len(fa.states)", "assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def test_union(): a, b = Symbol('a'), Symbol('b') A, B", "Symbol('b') A, B = State('A'), State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B})", "not fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba')) def test_concatenate(): a = Symbol('a')", "= State('A') B = State('B') transitions = { (A, a): {A}, (A, b):", "FiniteAutomaton.union(fa_1, fa_2) assert not fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert", "a = Symbol('a') A, B = State('A'), State('B') transitions = { (A, a):", "Symbol('b') A, B = State('A'), State('B') transitions = { (A, a): {B}, (A,", "a): {A}, (A, b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) assert not", "State('B') transitions = { (A, a): {B}, (B, a): {A}, } fa =", "not fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa = fa.negate() assert n_fa.evaluate(Sentence('')) assert", "b = Symbol('a'), Symbol('b') A, B = State('A'), State('B') fa_1 = FiniteAutomaton({(A, a):", "FiniteAutomaton({(C, a): D}, C, {D}) fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2) assert not fa_concat.evaluate(Sentence('')) assert", "(H, b): {D}, } fa = FiniteAutomaton(transitions, A, [A, D, G]) fa.remove_unreachable_states() assert", "State('H') transitions = { (A, a): {G}, (A, b): {B}, (B, a): {F},", "{B}) C, D = State('C'), State('D') fa_2 = FiniteAutomaton({(C, b): D}, C, {D})", "= State('H') transitions = { (A, a): {G}, (A, b): {B}, (B, a):", "= Symbol('b') A = State('A') B = State('B') C = State('C') D =", "= FiniteAutomaton.union(fa_1, fa_2) assert not fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b'))", "assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def test_union(): a, b = Symbol('a'),", "n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a = Symbol('a') b = Symbol('b') A = State('A') B", "= Symbol('a') A, B = State('A'), State('B') transitions = { (A, a): {B},", "{G}, (D, a): {A}, (D, b): {H}, (E, a): {E}, (E, b): {A},", "= State('E') F = State('F') G = State('G') H = State('H') transitions =", "Sentence, State, Symbol def test_copy(): a, b = Symbol('a'), Symbol('b') A, B =", "(E, b): {A}, (F, a): {B}, (F, b): {C}, (G, a): {G}, (G,", "fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab')) assert not", "b): {F}, (H, a): {H}, (H, b): {D}, } fa = FiniteAutomaton(transitions, A,", "= FiniteAutomaton(transitions, A, {A}) fa2 = fa1.copy() assert fa1 is not fa2 def", "fa.evaluate(Sentence('ab')) n_fa = fa.negate() assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable():", "def test_evaluate(): a = Symbol('a') A, B = State('A'), State('B') transitions = {", "{B}) fa.complete() def test_negate(): a = Symbol('a') b = Symbol('b') A = State('A')", "C = State('C') D = State('D') E = State('E') F = State('F') G", "a = Symbol('a') b = Symbol('b') A = State('A') B = State('B') transitions", "A = State('A') B = State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B})", "(D, a): {A}, (D, b): {H}, (E, a): {E}, (E, b): {A}, (F,", "not fa_union.evaluate(Sentence('ba')) def test_concatenate(): a = Symbol('a') A = State('A') B = State('B')", "<filename>tests/test_finite_automaton.py from kleeneup import FiniteAutomaton, Sentence, State, Symbol def test_copy(): a, b =", "fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba')) def test_concatenate(): a", "(F, b): {C}, (G, a): {G}, (G, b): {F}, (H, a): {H}, (H,", "State('B') transitions = { (A, a): {B}, (A, b): {A}, (B, a): {A},", "} fa1 = FiniteAutomaton(transitions, A, {A}) fa2 = fa1.copy() assert fa1 is not", "b = Symbol('b') A = State('A') B = State('B') transitions = { (A,", "= State('A') B = State('B') C = State('C') D = State('D') E =", "(B, b): {E}, (C, a): {C}, (C, b): {G}, (D, a): {A}, (D,", "FiniteAutomaton.concatenate(fa_1, fa_2) assert not fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa'))", "{E}, (C, a): {C}, (C, b): {G}, (D, a): {A}, (D, b): {H},", "State('D') fa_2 = FiniteAutomaton({(C, a): D}, C, {D}) fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2) assert", "{B}) assert not fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa = fa.negate() assert", "} fa = FiniteAutomaton(transitions, A, [A, D, G]) fa = fa.minimize() assert len(fa.states)", "assert fa.evaluate(Sentence('ab')) n_fa = fa.negate() assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab')) def", "{A}) fa2 = fa1.copy() assert fa1 is not fa2 def test_evaluate(): a =", "b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) assert not fa.evaluate(Sentence('')) assert not", "[A, D, G]) fa.remove_unreachable_states() assert len(fa.states) == 6 def test_minimize(): a = Symbol('a')", "Symbol('a'), Symbol('b') A, B = State('A'), State('B') transitions = { (A, a): {B},", "State('C') D = State('D') fa_2 = FiniteAutomaton({(C, a): D}, C, {D}) fa_concat =", "{D}) fa_union = FiniteAutomaton.union(fa_1, fa_2) assert not fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a'))", "E = State('E') F = State('F') G = State('G') H = State('H') transitions", "D}, C, {D}) fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2) assert not fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a'))", "= Symbol('a') b = Symbol('b') A = State('A') B = State('B') C =", "= Symbol('a'), Symbol('b') A, B = State('A'), State('B') fa_1 = FiniteAutomaton({(A, a): B},", "FiniteAutomaton(transitions, A, {A}) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def test_union(): a,", "FiniteAutomaton({(A, a): B}, A, {B}) C, D = State('C'), State('D') fa_2 = FiniteAutomaton({(C,", "D, G]) fa.remove_unreachable_states() assert len(fa.states) == 6 def test_minimize(): a = Symbol('a') b", "State('F') G = State('G') H = State('H') transitions = { (A, a): {G},", "D}, C, {D}) fa_union = FiniteAutomaton.union(fa_1, fa_2) assert not fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a'))", "b): {A}, (F, a): {B}, (F, b): {C}, (G, a): {G}, (G, b):", "State('B') transitions = { (A, a): {A}, (A, b): {B}, } fa =", "fa.remove_unreachable_states() assert len(fa.states) == 6 def test_minimize(): a = Symbol('a') b = Symbol('b')", "b): {E}, (C, a): {C}, (C, b): {G}, (D, a): {A}, (D, b):", "= State('G') H = State('H') transitions = { (A, a): {G}, (A, b):", "State('B') C = State('C') D = State('D') E = State('E') F = State('F')", "Symbol('a') A = State('A') B = State('B') fa_1 = FiniteAutomaton({(A, a): B}, A,", "= State('B') C = State('C') D = State('D') E = State('E') F =", "b): {C}, (G, a): {G}, (G, b): {F}, (H, a): {H}, (H, b):", "= State('C') D = State('D') fa_2 = FiniteAutomaton({(C, a): D}, C, {D}) fa_concat", "B = State('A'), State('B') transitions = { (A, a): {B}, (B, a): {A},", "assert fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa')) def test_complete(): a = Symbol('a') b = Symbol('b')", "{F}, (B, b): {E}, (C, a): {C}, (C, b): {G}, (D, a): {A},", "assert n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a = Symbol('a') b = Symbol('b')", "fa_concat.evaluate(Sentence('aaa')) def test_complete(): a = Symbol('a') b = Symbol('b') A = State('A') B", "} fa = FiniteAutomaton(transitions, A, [A, D, G]) fa.remove_unreachable_states() assert len(fa.states) == 6", "= State('D') E = State('E') F = State('F') G = State('G') H =", "= FiniteAutomaton(transitions, A, {A}) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def test_union():", "b): {B}, (B, a): {F}, (B, b): {E}, (C, a): {C}, (C, b):", "{G}, (A, b): {B}, (B, a): {F}, (B, b): {E}, (C, a): {C},", "fa = FiniteAutomaton(transitions, A, [A, D, G]) fa = fa.minimize() assert len(fa.states) ==", "test_minimize(): a = Symbol('a') b = Symbol('b') A = State('A') B = State('B')", "G = State('G') H = State('H') transitions = { (A, a): {G}, (A,", "A, [A, D, G]) fa.remove_unreachable_states() assert len(fa.states) == 6 def test_minimize(): a =", "from kleeneup import FiniteAutomaton, Sentence, State, Symbol def test_copy(): a, b = Symbol('a'),", "State('A'), State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C, D = State('C'),", "FiniteAutomaton(transitions, A, {B}) assert not fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa =", "= State('A'), State('B') transitions = { (A, a): {B}, (A, b): {A}, (B,", "A, {B}) fa.complete() def test_negate(): a = Symbol('a') b = Symbol('b') A =", "{ (A, a): {A}, (A, b): {B}, } fa = FiniteAutomaton(transitions, A, {B})", "fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa = fa.negate() assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab'))", "(C, b): {G}, (D, a): {A}, (D, b): {H}, (E, a): {E}, (E,", "assert not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa')) def test_complete(): a = Symbol('a')", "n_fa = fa.negate() assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a", "b = Symbol('a'), Symbol('b') A, B = State('A'), State('B') transitions = { (A,", "fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C, D = State('C'), State('D') fa_2", "b): D}, C, {D}) fa_union = FiniteAutomaton.union(fa_1, fa_2) assert not fa_1.evaluate(Sentence('b')) assert not", "{ (A, a): {G}, (A, b): {B}, (B, a): {F}, (B, b): {E},", "(A, b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) fa.complete() def test_negate(): a", "Symbol('b') A = State('A') B = State('B') transitions = { (A, a): {A},", "{C}, (C, b): {G}, (D, a): {A}, (D, b): {H}, (E, a): {E},", "test_copy(): a, b = Symbol('a'), Symbol('b') A, B = State('A'), State('B') transitions =", "fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2) assert not fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert", "A, {A}) fa2 = fa1.copy() assert fa1 is not fa2 def test_evaluate(): a", "{A}, (F, a): {B}, (F, b): {C}, (G, a): {G}, (G, b): {F},", "= State('F') G = State('G') H = State('H') transitions = { (A, a):", "assert not fa_concat.evaluate(Sentence('aaa')) def test_complete(): a = Symbol('a') b = Symbol('b') A =", "State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C = State('C') D =", "F = State('F') G = State('G') H = State('H') transitions = { (A,", "def test_minimize(): a = Symbol('a') b = Symbol('b') A = State('A') B =", "a): B}, A, {B}) C = State('C') D = State('D') fa_2 = FiniteAutomaton({(C,", "a): {C}, (C, b): {G}, (D, a): {A}, (D, b): {H}, (E, a):", "= State('B') transitions = { (A, a): {A}, (A, b): {B}, } fa", "fa1 = FiniteAutomaton(transitions, A, {A}) fa2 = fa1.copy() assert fa1 is not fa2", "a): {E}, (E, b): {A}, (F, a): {B}, (F, b): {C}, (G, a):", "fa_union.evaluate(Sentence('ba')) def test_concatenate(): a = Symbol('a') A = State('A') B = State('B') fa_1", "(A, a): {A}, (A, b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) assert", "a, b = Symbol('a'), Symbol('b') A, B = State('A'), State('B') transitions = {", "= { (A, a): {B}, (B, a): {A}, } fa = FiniteAutomaton(transitions, A,", "fa_union = FiniteAutomaton.union(fa_1, fa_2) assert not fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert", "= Symbol('a') b = Symbol('b') A = State('A') B = State('B') transitions =", "{H}, (E, a): {E}, (E, b): {A}, (F, a): {B}, (F, b): {C},", "assert len(fa.states) == 6 def test_minimize(): a = Symbol('a') b = Symbol('b') A", "(B, a): {F}, (B, b): {E}, (C, a): {C}, (C, b): {G}, (D,", "a): {G}, (G, b): {F}, (H, a): {H}, (H, b): {D}, } fa", "State('D') fa_2 = FiniteAutomaton({(C, b): D}, C, {D}) fa_union = FiniteAutomaton.union(fa_1, fa_2) assert", "b): {D}, } fa = FiniteAutomaton(transitions, A, [A, D, G]) fa = fa.minimize()", "a): {B}, (A, b): {A}, (B, a): {A}, (B, b): {B}, } fa1", "a): {H}, (H, b): {D}, } fa = FiniteAutomaton(transitions, A, [A, D, G])", "A = State('A') B = State('B') transitions = { (A, a): {A}, (A,", "Symbol('a') b = Symbol('b') A = State('A') B = State('B') transitions = {", "import FiniteAutomaton, Sentence, State, Symbol def test_copy(): a, b = Symbol('a'), Symbol('b') A,", "test_concatenate(): a = Symbol('a') A = State('A') B = State('B') fa_1 = FiniteAutomaton({(A,", "def test_negate(): a = Symbol('a') b = Symbol('b') A = State('A') B =", "(G, a): {G}, (G, b): {F}, (H, a): {H}, (H, b): {D}, }", "n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a = Symbol('a') b = Symbol('b') A", "Symbol('a') A, B = State('A'), State('B') transitions = { (A, a): {B}, (B,", "test_evaluate(): a = Symbol('a') A, B = State('A'), State('B') transitions = { (A,", "a): {A}, (D, b): {H}, (E, a): {E}, (E, b): {A}, (F, a):", "{F}, (H, a): {H}, (H, b): {D}, } fa = FiniteAutomaton(transitions, A, [A,", "{A}, (A, b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) fa.complete() def test_negate():", "b = Symbol('b') A = State('A') B = State('B') C = State('C') D", "{B}, (B, a): {F}, (B, b): {E}, (C, a): {C}, (C, b): {G},", "= Symbol('b') A = State('A') B = State('B') transitions = { (A, a):", "{B}, (B, a): {A}, } fa = FiniteAutomaton(transitions, A, {A}) assert not fa.evaluate(Sentence('aaa'))", "{B}, (A, b): {A}, (B, a): {A}, (B, b): {B}, } fa1 =", "= Symbol('a') A = State('A') B = State('B') fa_1 = FiniteAutomaton({(A, a): B},", "= { (A, a): {A}, (A, b): {B}, } fa = FiniteAutomaton(transitions, A,", "assert fa.evaluate(Sentence('aa')) def test_union(): a, b = Symbol('a'), Symbol('b') A, B = State('A'),", "test_negate(): a = Symbol('a') b = Symbol('b') A = State('A') B = State('B')", "A, {B}) C = State('C') D = State('D') fa_2 = FiniteAutomaton({(C, a): D},", "= fa.negate() assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a =", "= State('A'), State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C, D =", "assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a = Symbol('a') b", "assert not fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba')) def test_concatenate(): a = Symbol('a') A =", "State('A'), State('B') transitions = { (A, a): {B}, (B, a): {A}, } fa", "transitions = { (A, a): {B}, (B, a): {A}, } fa = FiniteAutomaton(transitions,", "{A}, (B, b): {B}, } fa1 = FiniteAutomaton(transitions, A, {A}) fa2 = fa1.copy()", "transitions = { (A, a): {A}, (A, b): {B}, } fa = FiniteAutomaton(transitions,", "fa_2) assert not fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert not", "State('E') F = State('F') G = State('G') H = State('H') transitions = {", "not fa_concat.evaluate(Sentence('aaa')) def test_complete(): a = Symbol('a') b = Symbol('b') A = State('A')", "def test_union(): a, b = Symbol('a'), Symbol('b') A, B = State('A'), State('B') fa_1", "{A}, } fa = FiniteAutomaton(transitions, A, {A}) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert", "FiniteAutomaton(transitions, A, {A}) fa2 = fa1.copy() assert fa1 is not fa2 def test_evaluate():", "FiniteAutomaton(transitions, A, {B}) fa.complete() def test_negate(): a = Symbol('a') b = Symbol('b') A", "State('G') H = State('H') transitions = { (A, a): {G}, (A, b): {B},", "(F, a): {B}, (F, b): {C}, (G, a): {G}, (G, b): {F}, (H,", "{E}, (E, b): {A}, (F, a): {B}, (F, b): {C}, (G, a): {G},", "D = State('D') fa_2 = FiniteAutomaton({(C, a): D}, C, {D}) fa_concat = FiniteAutomaton.concatenate(fa_1,", "= FiniteAutomaton(transitions, A, {B}) fa.complete() def test_negate(): a = Symbol('a') b = Symbol('b')", "a): B}, A, {B}) C, D = State('C'), State('D') fa_2 = FiniteAutomaton({(C, b):", "D = State('D') E = State('E') F = State('F') G = State('G') H", "= { (A, a): {B}, (A, b): {A}, (B, a): {A}, (B, b):", "= FiniteAutomaton(transitions, A, {B}) assert not fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa", "a): {G}, (A, b): {B}, (B, a): {F}, (B, b): {E}, (C, a):", "Symbol def test_copy(): a, b = Symbol('a'), Symbol('b') A, B = State('A'), State('B')", "fa = FiniteAutomaton(transitions, A, {B}) assert not fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab'))", "def test_copy(): a, b = Symbol('a'), Symbol('b') A, B = State('A'), State('B') transitions", "A, B = State('A'), State('B') transitions = { (A, a): {B}, (B, a):", "assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa = fa.negate() assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert", "B = State('B') C = State('C') D = State('D') E = State('E') F", "assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba'))", "fa = FiniteAutomaton(transitions, A, {A}) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def", "b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) fa.complete() def test_negate(): a =", "= State('D') fa_2 = FiniteAutomaton({(C, a): D}, C, {D}) fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2)", "(C, a): {C}, (C, b): {G}, (D, a): {A}, (D, b): {H}, (E,", "not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa')) def test_complete(): a = Symbol('a') b", "State, Symbol def test_copy(): a, b = Symbol('a'), Symbol('b') A, B = State('A'),", "a): {B}, (F, b): {C}, (G, a): {G}, (G, b): {F}, (H, a):", "(B, a): {A}, (B, b): {B}, } fa1 = FiniteAutomaton(transitions, A, {A}) fa2", "assert not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab'))", "C, D = State('C'), State('D') fa_2 = FiniteAutomaton({(C, b): D}, C, {D}) fa_union", "fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba')) def test_concatenate(): a = Symbol('a') A = State('A') B", "assert not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a = Symbol('a') b = Symbol('b') A =", "A, B = State('A'), State('B') transitions = { (A, a): {B}, (A, b):", "assert not fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa')) def test_complete():", "test_union(): a, b = Symbol('a'), Symbol('b') A, B = State('A'), State('B') fa_1 =", "{H}, (H, b): {D}, } fa = FiniteAutomaton(transitions, A, [A, D, G]) fa.remove_unreachable_states()", "not fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence('')) assert", "fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa = fa.negate() assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa'))", "b): {B}, } fa1 = FiniteAutomaton(transitions, A, {A}) fa2 = fa1.copy() assert fa1", "State('D') E = State('E') F = State('F') G = State('G') H = State('H')", "(A, b): {B}, (B, a): {F}, (B, b): {E}, (C, a): {C}, (C,", "B = State('A'), State('B') transitions = { (A, a): {B}, (A, b): {A},", "FiniteAutomaton, Sentence, State, Symbol def test_copy(): a, b = Symbol('a'), Symbol('b') A, B", "assert not fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba')) def test_concatenate(): a =", "n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a = Symbol('a') b =", "not fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa')) def test_complete(): a", "A, {B}) C, D = State('C'), State('D') fa_2 = FiniteAutomaton({(C, b): D}, C,", "len(fa.states) == 6 def test_minimize(): a = Symbol('a') b = Symbol('b') A =", "{B}, } fa = FiniteAutomaton(transitions, A, {B}) fa.complete() def test_negate(): a = Symbol('a')", "{B}, } fa1 = FiniteAutomaton(transitions, A, {A}) fa2 = fa1.copy() assert fa1 is", "State('A') B = State('B') transitions = { (A, a): {A}, (A, b): {B},", "(B, b): {B}, } fa1 = FiniteAutomaton(transitions, A, {A}) fa2 = fa1.copy() assert", "State('C'), State('D') fa_2 = FiniteAutomaton({(C, b): D}, C, {D}) fa_union = FiniteAutomaton.union(fa_1, fa_2)", "def test_concatenate(): a = Symbol('a') A = State('A') B = State('B') fa_1 =", "= FiniteAutomaton({(C, a): D}, C, {D}) fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2) assert not fa_concat.evaluate(Sentence(''))", "Symbol('a') b = Symbol('b') A = State('A') B = State('B') C = State('C')", "{G}, (G, b): {F}, (H, a): {H}, (H, b): {D}, } fa =", "= State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C = State('C') D", "H = State('H') transitions = { (A, a): {G}, (A, b): {B}, (B,", "b): {A}, (B, a): {A}, (B, b): {B}, } fa1 = FiniteAutomaton(transitions, A,", "not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa = fa.negate() assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert not", "= Symbol('a'), Symbol('b') A, B = State('A'), State('B') transitions = { (A, a):", "assert fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba')) def test_concatenate():", "fa1 is not fa2 def test_evaluate(): a = Symbol('a') A, B = State('A'),", "{A}, (A, b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) assert not fa.evaluate(Sentence(''))", "fa_2 = FiniteAutomaton({(C, b): D}, C, {D}) fa_union = FiniteAutomaton.union(fa_1, fa_2) assert not", "assert not fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa = fa.negate() assert n_fa.evaluate(Sentence(''))", "G]) fa.remove_unreachable_states() assert len(fa.states) == 6 def test_minimize(): a = Symbol('a') b =", "transitions = { (A, a): {B}, (A, b): {A}, (B, a): {A}, (B,", "not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def test_union(): a, b = Symbol('a'), Symbol('b')", "fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def test_union(): a, b = Symbol('a'), Symbol('b') A, B =", "} fa = FiniteAutomaton(transitions, A, {B}) fa.complete() def test_negate(): a = Symbol('a') b", "fa = FiniteAutomaton(transitions, A, {B}) fa.complete() def test_negate(): a = Symbol('a') b =", "fa_2 = FiniteAutomaton({(C, a): D}, C, {D}) fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2) assert not", "B}, A, {B}) C = State('C') D = State('D') fa_2 = FiniteAutomaton({(C, a):", "fa2 = fa1.copy() assert fa1 is not fa2 def test_evaluate(): a = Symbol('a')", "State('A') B = State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C =", "FiniteAutomaton(transitions, A, [A, D, G]) fa.remove_unreachable_states() assert len(fa.states) == 6 def test_minimize(): a", "not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a = Symbol('a') b = Symbol('b') A = State('A')", "C, {D}) fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2) assert not fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a')) assert", "(A, b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) assert not fa.evaluate(Sentence('')) assert", "(H, b): {D}, } fa = FiniteAutomaton(transitions, A, [A, D, G]) fa =", "fa_2) assert not fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa')) def", "State('A') B = State('B') C = State('C') D = State('D') E = State('E')", "D = State('C'), State('D') fa_2 = FiniteAutomaton({(C, b): D}, C, {D}) fa_union =", "(A, a): {G}, (A, b): {B}, (B, a): {F}, (B, b): {E}, (C,", "fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C = State('C') D = State('D')", "State('A'), State('B') transitions = { (A, a): {B}, (A, b): {A}, (B, a):", "fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa')) def test_complete(): a =", "fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence('')) assert not", "assert fa1 is not fa2 def test_evaluate(): a = Symbol('a') A, B =", "{B}, (F, b): {C}, (G, a): {G}, (G, b): {F}, (H, a): {H},", "= State('A'), State('B') transitions = { (A, a): {B}, (B, a): {A}, }", "fa = FiniteAutomaton(transitions, A, [A, D, G]) fa.remove_unreachable_states() assert len(fa.states) == 6 def", "a): D}, C, {D}) fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2) assert not fa_concat.evaluate(Sentence('')) assert not", "= FiniteAutomaton({(A, a): B}, A, {B}) C = State('C') D = State('D') fa_2", "kleeneup import FiniteAutomaton, Sentence, State, Symbol def test_copy(): a, b = Symbol('a'), Symbol('b')", "C = State('C') D = State('D') fa_2 = FiniteAutomaton({(C, a): D}, C, {D})", "fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def test_union(): a, b = Symbol('a'), Symbol('b') A,", "{D}, } fa = FiniteAutomaton(transitions, A, [A, D, G]) fa = fa.minimize() assert", "transitions = { (A, a): {G}, (A, b): {B}, (B, a): {F}, (B,", "fa.evaluate(Sentence('aa')) def test_union(): a, b = Symbol('a'), Symbol('b') A, B = State('A'), State('B')", "a): {A}, (A, b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) fa.complete() def", "} fa = FiniteAutomaton(transitions, A, {B}) assert not fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa')) assert", "a = Symbol('a') b = Symbol('b') A = State('A') B = State('B') C", "fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa')) def test_complete(): a = Symbol('a') b = Symbol('b') A", "(A, a): {A}, (A, b): {B}, } fa = FiniteAutomaton(transitions, A, {B}) fa.complete()", "A = State('A') B = State('B') C = State('C') D = State('D') E", "assert not fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence(''))", "(E, a): {E}, (E, b): {A}, (F, a): {B}, (F, b): {C}, (G,", "def test_remove_unreachable(): a = Symbol('a') b = Symbol('b') A = State('A') B =", "(B, a): {A}, } fa = FiniteAutomaton(transitions, A, {A}) assert not fa.evaluate(Sentence('aaa')) assert", "fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba')) def", "B = State('B') transitions = { (A, a): {A}, (A, b): {B}, }", "{A}) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def test_union(): a, b =", "a): {A}, (B, b): {B}, } fa1 = FiniteAutomaton(transitions, A, {A}) fa2 =", "test_remove_unreachable(): a = Symbol('a') b = Symbol('b') A = State('A') B = State('B')", "fa2 def test_evaluate(): a = Symbol('a') A, B = State('A'), State('B') transitions =", "{C}, (G, a): {G}, (G, b): {F}, (H, a): {H}, (H, b): {D},", "= State('C'), State('D') fa_2 = FiniteAutomaton({(C, b): D}, C, {D}) fa_union = FiniteAutomaton.union(fa_1,", "fa1.copy() assert fa1 is not fa2 def test_evaluate(): a = Symbol('a') A, B", "= FiniteAutomaton.concatenate(fa_1, fa_2) assert not fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert not", "(A, a): {B}, (B, a): {A}, } fa = FiniteAutomaton(transitions, A, {A}) assert", "= FiniteAutomaton(transitions, A, [A, D, G]) fa = fa.minimize() assert len(fa.states) == 3", "b): {G}, (D, a): {A}, (D, b): {H}, (E, a): {E}, (E, b):", "{D}) fa_concat = FiniteAutomaton.concatenate(fa_1, fa_2) assert not fa_concat.evaluate(Sentence('')) assert not fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa'))", "Symbol('b') A = State('A') B = State('B') C = State('C') D = State('D')", "a = Symbol('a') A = State('A') B = State('B') fa_1 = FiniteAutomaton({(A, a):", "a, b = Symbol('a'), Symbol('b') A, B = State('A'), State('B') fa_1 = FiniteAutomaton({(A,", "(A, a): {B}, (A, b): {A}, (B, a): {A}, (B, b): {B}, }", "A, {B}) assert not fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('ab')) n_fa = fa.negate()", "fa.negate() assert n_fa.evaluate(Sentence('')) assert n_fa.evaluate(Sentence('aaa')) assert not n_fa.evaluate(Sentence('ab')) def test_remove_unreachable(): a = Symbol('a')", "FiniteAutomaton({(C, b): D}, C, {D}) fa_union = FiniteAutomaton.union(fa_1, fa_2) assert not fa_1.evaluate(Sentence('b')) assert", "B = State('A'), State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C, D", "= FiniteAutomaton(transitions, A, [A, D, G]) fa.remove_unreachable_states() assert len(fa.states) == 6 def test_minimize():", "def test_complete(): a = Symbol('a') b = Symbol('b') A = State('A') B =", "= FiniteAutomaton({(C, b): D}, C, {D}) fa_union = FiniteAutomaton.union(fa_1, fa_2) assert not fa_1.evaluate(Sentence('b'))", "State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C, D = State('C'), State('D')", "b): {H}, (E, a): {E}, (E, b): {A}, (F, a): {B}, (F, b):", "A, {A}) assert not fa.evaluate(Sentence('aaa')) assert fa.evaluate(Sentence('')) assert fa.evaluate(Sentence('aa')) def test_union(): a, b", "a): {F}, (B, b): {E}, (C, a): {C}, (C, b): {G}, (D, a):", "{B}) C = State('C') D = State('D') fa_2 = FiniteAutomaton({(C, a): D}, C,", "not fa_2.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('a')) assert fa_union.evaluate(Sentence('b')) assert not fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab')) assert", "== 6 def test_minimize(): a = Symbol('a') b = Symbol('b') A = State('A')", "B}, A, {B}) C, D = State('C'), State('D') fa_2 = FiniteAutomaton({(C, b): D},", "{D}, } fa = FiniteAutomaton(transitions, A, [A, D, G]) fa.remove_unreachable_states() assert len(fa.states) ==", "= State('A') B = State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C", "not fa2 def test_evaluate(): a = Symbol('a') A, B = State('A'), State('B') transitions", "is not fa2 def test_evaluate(): a = Symbol('a') A, B = State('A'), State('B')", "{ (A, a): {B}, (A, b): {A}, (B, a): {A}, (B, b): {B},", "{B}, } fa = FiniteAutomaton(transitions, A, {B}) assert not fa.evaluate(Sentence('')) assert not fa.evaluate(Sentence('aaa'))", "a): {B}, (B, a): {A}, } fa = FiniteAutomaton(transitions, A, {A}) assert not", "B = State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C = State('C')", "Symbol('a'), Symbol('b') A, B = State('A'), State('B') fa_1 = FiniteAutomaton({(A, a): B}, A,", "fa_concat.evaluate(Sentence('a')) assert fa_concat.evaluate(Sentence('aa')) assert not fa_concat.evaluate(Sentence('aaa')) def test_complete(): a = Symbol('a') b =", "{A}, (D, b): {H}, (E, a): {E}, (E, b): {A}, (F, a): {B},", "C, {D}) fa_union = FiniteAutomaton.union(fa_1, fa_2) assert not fa_1.evaluate(Sentence('b')) assert not fa_2.evaluate(Sentence('a')) assert", "fa_union.evaluate(Sentence('')) assert not fa_union.evaluate(Sentence('ab')) assert not fa_union.evaluate(Sentence('ba')) def test_concatenate(): a = Symbol('a') A", "(G, b): {F}, (H, a): {H}, (H, b): {D}, } fa = FiniteAutomaton(transitions,", "assert not fa_union.evaluate(Sentence('ba')) def test_concatenate(): a = Symbol('a') A = State('A') B =", "(H, a): {H}, (H, b): {D}, } fa = FiniteAutomaton(transitions, A, [A, D,", "(A, b): {A}, (B, a): {A}, (B, b): {B}, } fa1 = FiniteAutomaton(transitions,", "State('C') D = State('D') E = State('E') F = State('F') G = State('G')", "A, B = State('A'), State('B') fa_1 = FiniteAutomaton({(A, a): B}, A, {B}) C,", "= State('C') D = State('D') E = State('E') F = State('F') G =" ]
[ "0: period = period + 1 return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def say(self, contet):", "of {} From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey))", "= retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Statements of Cash Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker,", "return self.profile[0]['beta'] @property def share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property def tax_rate(self): period = 0", "module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving data of {} From {}'.format(self.profile[0]['companyName'], url))", "import retrieve_from # 3b732d31142b79d4e8d659612f55181a class API_Data: def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url", "self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def", "Balance Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Statements of Cash Flow...') self.statements_of_cash_flow", "tax_rate(self): period = 0 while self.income_statements[period]['incomeTaxExpense'] < 0: period = period + 1", "if self.verbose: print('[DATA MODULE] {}'.format(contet)) if __name__ == \"__main__\": data = API_Data('F') print(data.net_receivable(1))", "return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0): return self.quote[0]['price'] @property def company_name(self): return self.profile[0]['companyName'] @property", "self.say('Initiate data module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving data of {} From", "verbose=True): self.url = url self.ticker = ticker self.verbose = verbose self.say('Initiate data module...')", "verbose self.say('Initiate data module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving data of {}", "self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def say(self, contet): if self.verbose: print('[DATA MODULE] {}'.format(contet)) if __name__", "ticker, apikey)) print('Rerieving data of {} From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income statements...') self.income_statements", "period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0): return self.quote[0]['price'] @property def company_name(self): return self.profile[0]['companyName']", "statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Balance Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker,", "def revenue(self, period=0): return self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0):", "self.balance_sheets[period]['accountPayables'] def inventory(self, period=0): return self.balance_sheets[period]['inventory'] def revenue(self, period=0): return self.income_statements[period]['revenue'] def gross_profit_ratio(self,", "def depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def", "self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0): return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self,", "period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0): return self.quote[0]['price']", "return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities'] def", "return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0): return", "self.say('Loading Income statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Balance Sheet...') self.balance_sheets =", "url self.ticker = ticker self.verbose = verbose self.say('Initiate data module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url,", "= period + 1 return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def say(self, contet): if self.verbose:", "def inventory(self, period=0): return self.balance_sheets[period]['inventory'] def revenue(self, period=0): return self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0):", "= ticker self.verbose = verbose self.say('Initiate data module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey))", "ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url = url self.ticker = ticker self.verbose = verbose", "self.verbose = verbose self.say('Initiate data module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving data", "net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0): return self.balance_sheets[period]['accountPayables'] def inventory(self, period=0): return", "period=0): return self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0): return self.balance_sheets[period]['accountPayables'] def inventory(self, period=0): return self.balance_sheets[period]['inventory']", "return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0): return", "retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Statements of Cash Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey))", "apikey)) self.say('Loading Balance Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Statements of Cash", "def say(self, contet): if self.verbose: print('[DATA MODULE] {}'.format(contet)) if __name__ == \"__main__\": data", "period = 0 while self.income_statements[period]['incomeTaxExpense'] < 0: period = period + 1 return", "Statements of Cash Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Quote...') self.quote =", "self.quote[0]['sharesOutstanding'] @property def tax_rate(self): period = 0 while self.income_statements[period]['incomeTaxExpense'] < 0: period =", "account_payable(self, period=0): return self.balance_sheets[period]['accountPayables'] def inventory(self, period=0): return self.balance_sheets[period]['inventory'] def revenue(self, period=0): return", "self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0): return self.quote[0]['price'] @property def company_name(self): return self.profile[0]['companyName'] @property def", "data module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving data of {} From {}'.format(self.profile[0]['companyName'],", "API_Data: def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url = url self.ticker = ticker", "= retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving data of {} From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income", "return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def say(self, contet): if self.verbose: print('[DATA MODULE] {}'.format(contet)) if", "from util import retrieve_from # 3b732d31142b79d4e8d659612f55181a class API_Data: def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com',", "period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0):", "print('Rerieving data of {} From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url,", "data of {} From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker,", "self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self,", "self.income_statements[period]['incomeTaxExpense'] < 0: period = period + 1 return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def", "self.url = url self.ticker = ticker self.verbose = verbose self.say('Initiate data module...') self.profile", "self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving data of {} From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading", "def capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0):", "def total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0): return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0):", "return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0): return self.income_statements[period]['ebitda'] def", "retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0): return self.balance_sheets[period]['accountPayables']", "self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities']", "self.balance_sheets[period]['inventory'] def revenue(self, period=0): return self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self,", "ticker, apikey)) self.say('Loading Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def net_receivable(self, period=0): return", "period=0): return self.balance_sheets[period]['accountPayables'] def inventory(self, period=0): return self.balance_sheets[period]['inventory'] def revenue(self, period=0): return self.income_statements[period]['revenue']", "period=0): return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0):", "ebitda(self, period=0): return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0): return", "def share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property def tax_rate(self): period = 0 while self.income_statements[period]['incomeTaxExpense'] <", "0 while self.income_statements[period]['incomeTaxExpense'] < 0: period = period + 1 return self.income_statements[period]['incomeTaxExpense'] /", "= verbose self.say('Initiate data module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving data of", "= retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0): return", "apikey)) def net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0): return self.balance_sheets[period]['accountPayables'] def inventory(self,", "self.say('Loading Balance Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Statements of Cash Flow...')", "@property def beta(self): return self.profile[0]['beta'] @property def share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property def tax_rate(self):", "@property def share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property def tax_rate(self): period = 0 while self.income_statements[period]['incomeTaxExpense']", "working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self,", "self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0): return self.balance_sheets[period]['accountPayables'] def inventory(self, period=0): return self.balance_sheets[period]['inventory'] def revenue(self,", "retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving data of {} From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income statements...')", "<filename>data.py from util import retrieve_from # 3b732d31142b79d4e8d659612f55181a class API_Data: def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702',", "+ 1 return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def say(self, contet): if self.verbose: print('[DATA MODULE]", "say(self, contet): if self.verbose: print('[DATA MODULE] {}'.format(contet)) if __name__ == \"__main__\": data =", "beta(self): return self.profile[0]['beta'] @property def share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property def tax_rate(self): period =", "{}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Balance Sheet...')", "return self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0): return self.balance_sheets[period]['accountPayables'] def inventory(self, period=0): return self.balance_sheets[period]['inventory'] def", "period=0): return self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow']", "def ebitda(self, period=0): return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0):", "depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self,", "capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0): return", "def gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0):", "self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents']", "self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Statements of Cash Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url,", "total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0): return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0): return", "period = period + 1 return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def say(self, contet): if", "period=0): return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities']", "self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self,", "= url self.ticker = ticker self.verbose = verbose self.say('Initiate data module...') self.profile =", "return self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0): return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def", "{} From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading", "@property def company_name(self): return self.profile[0]['companyName'] @property def beta(self): return self.profile[0]['beta'] @property def share_outstanding(self):", "self.profile[0]['beta'] @property def share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property def tax_rate(self): period = 0 while", "retrieve_from # 3b732d31142b79d4e8d659612f55181a class API_Data: def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url =", "period=0): return self.quote[0]['price'] @property def company_name(self): return self.profile[0]['companyName'] @property def beta(self): return self.profile[0]['beta']", "inventory(self, period=0): return self.balance_sheets[period]['inventory'] def revenue(self, period=0): return self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0): return", "__init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url = url self.ticker = ticker self.verbose =", "return self.profile[0]['companyName'] @property def beta(self): return self.profile[0]['beta'] @property def share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property", "def account_payable(self, period=0): return self.balance_sheets[period]['accountPayables'] def inventory(self, period=0): return self.balance_sheets[period]['inventory'] def revenue(self, period=0):", "self.say('Loading Statements of Cash Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Quote...') self.quote", "of Cash Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url,", "= retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Balance Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading", "period=0): return self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0): return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization']", "gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0): return", "period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0): return self.income_statements[period]['ebitda']", "self.quote[0]['price'] @property def company_name(self): return self.profile[0]['companyName'] @property def beta(self): return self.profile[0]['beta'] @property def", "self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities'] def ebitda(self,", "self.ticker = ticker self.verbose = verbose self.say('Initiate data module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker,", "def cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0): return self.quote[0]['price'] @property def company_name(self):", "util import retrieve_from # 3b732d31142b79d4e8d659612f55181a class API_Data: def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True):", "From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Balance", "return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0): return self.quote[0]['price'] @property", "share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property def tax_rate(self): period = 0 while self.income_statements[period]['incomeTaxExpense'] < 0:", "self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure']", "def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url = url self.ticker = ticker self.verbose", "# 3b732d31142b79d4e8d659612f55181a class API_Data: def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url = url", "period=0): return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets']", "self.profile[0]['companyName'] @property def beta(self): return self.profile[0]['beta'] @property def share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property def", "return self.quote[0]['sharesOutstanding'] @property def tax_rate(self): period = 0 while self.income_statements[period]['incomeTaxExpense'] < 0: period", "Income statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Balance Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url,", "contet): if self.verbose: print('[DATA MODULE] {}'.format(contet)) if __name__ == \"__main__\": data = API_Data('F')", "def working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets'] - self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def", "= retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def net_receivable(self,", "operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0): return", "< 0: period = period + 1 return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def say(self,", "cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0): return self.quote[0]['price'] @property def company_name(self): return", "def beta(self): return self.profile[0]['beta'] @property def share_outstanding(self): return self.quote[0]['sharesOutstanding'] @property def tax_rate(self): period", "company_name(self): return self.profile[0]['companyName'] @property def beta(self): return self.profile[0]['beta'] @property def share_outstanding(self): return self.quote[0]['sharesOutstanding']", "apikey)) self.say('Loading Statements of Cash Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Quote...')", "ticker self.verbose = verbose self.say('Initiate data module...') self.profile = retrieve_from('{}/api/v3/profile/{}?apikey={}'.format(url, ticker, apikey)) print('Rerieving", "self.say('Loading Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables'] def", "self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0): return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self,", "while self.income_statements[period]['incomeTaxExpense'] < 0: period = period + 1 return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax']", "apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url = url self.ticker = ticker self.verbose = verbose self.say('Initiate", "@property def tax_rate(self): period = 0 while self.income_statements[period]['incomeTaxExpense'] < 0: period = period", "period=0): return self.balance_sheets[period]['inventory'] def revenue(self, period=0): return self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio']", "def operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def total_liabilities(self, period=0): return self.balance_sheets[period]['totalLiabilities'] def ebitda(self, period=0):", "return self.balance_sheets[period]['accountPayables'] def inventory(self, period=0): return self.balance_sheets[period]['inventory'] def revenue(self, period=0): return self.income_statements[period]['revenue'] def", "revenue(self, period=0): return self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0): return", "ticker, apikey)) def net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0): return self.balance_sheets[period]['accountPayables'] def", "return self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio'] def operating_cash_flow(self, period=0): return self.statements_of_cash_flow[period]['operatingCashFlow'] def", "return self.income_statements[period]['ebitda'] def depreciation_and_amortization(self, period=0): return self.statements_of_cash_flow[period]['depreciationAndAmortization'] def working_capital(self, period=0): return self.balance_sheets[period]['totalCurrentAssets'] -", "self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def stock_price(self, period=0): return self.quote[0]['price'] @property def", "def company_name(self): return self.profile[0]['companyName'] @property def beta(self): return self.profile[0]['beta'] @property def share_outstanding(self): return", "3b732d31142b79d4e8d659612f55181a class API_Data: def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url = url self.ticker", "def net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0): return self.balance_sheets[period]['accountPayables'] def inventory(self, period=0):", "def stock_price(self, period=0): return self.quote[0]['price'] @property def company_name(self): return self.profile[0]['companyName'] @property def beta(self):", "Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Statements of Cash Flow...') self.statements_of_cash_flow =", "/ self.income_statements[period]['incomeBeforeTax'] def say(self, contet): if self.verbose: print('[DATA MODULE] {}'.format(contet)) if __name__ ==", "= 0 while self.income_statements[period]['incomeTaxExpense'] < 0: period = period + 1 return self.income_statements[period]['incomeTaxExpense']", "ticker, apikey)) self.say('Loading Statements of Cash Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading", "retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def net_receivable(self, period=0):", "return self.balance_sheets[period]['inventory'] def revenue(self, period=0): return self.income_statements[period]['revenue'] def gross_profit_ratio(self, period=0): return self.income_statements[period]['grossProfitRatio'] def", "Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey))", "apikey)) self.say('Loading Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables']", "Cash Flow...') self.statements_of_cash_flow = retrieve_from('{}/api/v3/cash-flow-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker,", "stock_price(self, period=0): return self.quote[0]['price'] @property def company_name(self): return self.profile[0]['companyName'] @property def beta(self): return", "retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Balance Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Statements", "self.income_statements[period]['incomeBeforeTax'] def say(self, contet): if self.verbose: print('[DATA MODULE] {}'.format(contet)) if __name__ == \"__main__\":", "ticker, apikey)) self.say('Loading Balance Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Statements of", "- self.balance_sheets[period]['totalCurrentLiabilities'] def capital_expenditure(self, period=0): return self.statements_of_cash_flow[period]['capitalExpenditure'] def cash_and_cash_equivalent(self, period=0): return self.balance_sheets[period]['cashAndCashEquivalents'] def", "url)) self.say('Loading Income statements...') self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Balance Sheet...') self.balance_sheets", "1 return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def say(self, contet): if self.verbose: print('[DATA MODULE] {}'.format(contet))", "url='https://financialmodelingprep.com', verbose=True): self.url = url self.ticker = ticker self.verbose = verbose self.say('Initiate data", "period + 1 return self.income_statements[period]['incomeTaxExpense'] / self.income_statements[period]['incomeBeforeTax'] def say(self, contet): if self.verbose: print('[DATA", "return self.quote[0]['price'] @property def company_name(self): return self.profile[0]['companyName'] @property def beta(self): return self.profile[0]['beta'] @property", "self.income_statements = retrieve_from('{}/api/v3/income-statement/{}?apikey={}'.format(url, ticker, apikey)) self.say('Loading Balance Sheet...') self.balance_sheets = retrieve_from('{}/api/v3/balance-sheet-statement/{}?apikey={}'.format(url, ticker, apikey))", "def tax_rate(self): period = 0 while self.income_statements[period]['incomeTaxExpense'] < 0: period = period +", "apikey)) print('Rerieving data of {} From {}'.format(self.profile[0]['companyName'], url)) self.say('Loading Income statements...') self.income_statements =", "class API_Data: def __init__(self, ticker, apikey='2bd6ce6f77da18e51c3e254ed9060702', url='https://financialmodelingprep.com', verbose=True): self.url = url self.ticker =", "self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables'] def account_payable(self, period=0):", "Quote...') self.quote = retrieve_from('{}/api/v3/quote/{}?apikey={}'.format(url, ticker, apikey)) def net_receivable(self, period=0): return self.balance_sheets[period]['netReceivables'] def account_payable(self," ]
[]
[ "by Django 3.1.1 on 2020-11-16 06:44 from django.db import migrations, models class Migration(migrations.Migration):", "'0001_initial'), ] operations = [ migrations.AlterField( model_name='product_data', name='product_name', field=models.CharField(blank=True, max_length=2000, null=True), ), ]", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('main_store', '0001_initial'), ] operations =", "on 2020-11-16 06:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main_store', '0001_initial'), ] operations", "3.1.1 on 2020-11-16 06:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "06:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main_store', '0001_initial'),", "= [ ('main_store', '0001_initial'), ] operations = [ migrations.AlterField( model_name='product_data', name='product_name', field=models.CharField(blank=True, max_length=2000,", "dependencies = [ ('main_store', '0001_initial'), ] operations = [ migrations.AlterField( model_name='product_data', name='product_name', field=models.CharField(blank=True,", "Django 3.1.1 on 2020-11-16 06:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "class Migration(migrations.Migration): dependencies = [ ('main_store', '0001_initial'), ] operations = [ migrations.AlterField( model_name='product_data',", "<filename>main_store/migrations/0002_auto_20201116_1214.py<gh_stars>0 # Generated by Django 3.1.1 on 2020-11-16 06:44 from django.db import migrations,", "migrations, models class Migration(migrations.Migration): dependencies = [ ('main_store', '0001_initial'), ] operations = [", "[ ('main_store', '0001_initial'), ] operations = [ migrations.AlterField( model_name='product_data', name='product_name', field=models.CharField(blank=True, max_length=2000, null=True),", "2020-11-16 06:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main_store',", "Generated by Django 3.1.1 on 2020-11-16 06:44 from django.db import migrations, models class", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main_store', '0001_initial'), ]", "models class Migration(migrations.Migration): dependencies = [ ('main_store', '0001_initial'), ] operations = [ migrations.AlterField(", "('main_store', '0001_initial'), ] operations = [ migrations.AlterField( model_name='product_data', name='product_name', field=models.CharField(blank=True, max_length=2000, null=True), ),", "Migration(migrations.Migration): dependencies = [ ('main_store', '0001_initial'), ] operations = [ migrations.AlterField( model_name='product_data', name='product_name',", "# Generated by Django 3.1.1 on 2020-11-16 06:44 from django.db import migrations, models" ]
[ "in enumerate(row.yvars): name = row.volume_id #+ '_' + yvar.lower() print(\"# precision: tgt %.2e", "max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % # ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i]", "Y) #print xvars #(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) #print(\"linear R^2[sample] %.8f\"", "= term(df, var) return df def choose(vars, df1, df2): X1 = df1.loc[:, vars].as_matrix()", "row.score)) print(\"#\" + row.volume_id) r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id) print(\"%s %s %s 20\"", "for x in arr ] # print(yvars[i] + \" = { \" +", "= load_samples(sample_file, genvars=xvars_full) trace(\"reading test samples...\" + test_file) test = load_samples(test_file, genvars=xvars_full) trace(\"linear", "matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model", "2 if 'tofext' in id: r_bin = 3 if 'cal' in id: r_bin", "print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\" % lr.intercept_[i]) #sample_dev =", "= deviation_stat(lr.predict(X), Y, prec=precision) #test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest), Ytest,", "z_bin, quadrant)) for i, yvar in enumerate(row.yvars): name = row.volume_id #+ '_' +", "\" = { \" + ', '.join(arr) + \" }\") # print(\"deviation stat", "'By', 'Bz'] #yvars = ['Bz'] (Y, Ytest) = choose(yvars, df, test) #(Y, Ytest)", "'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr'] #xvars=['x', 'xzz', 'xyz', 'yz',", "python debug = True # enable trace def trace(x): global debug if debug:", "\"test_file\": [test_file], \"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)] }) def volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat', '')\\", "prec * 100) return (max_dev, max_pct, avg_dev, avg_pct) # IO Df def load_samples(path,", "df.Bx) - np.arctan2(df.y, df.x) df['Br'] = df.Bt * np.cos(df.Bpsi) df['Bp'] = df.Bt *", "= combinatrial_vars('xyz', 3) # use all terms upto 3rd power (X, Xtest) =", "l)]) return term_list # product :: a#* => [a] -> a def product(xs):", "list(vars_str))) # (f(X), Y) -> (max deviation, max%, avg dev, avg%) def deviation_stat(fX,", "= sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\", \"yy\", \"zz\", 'x',", "\" + str(datetime.datetime.today())) print(\"# \" + ', '.join(result.iloc[0].yvars) + \" = 1 +", "quadrant = get_location_by_volume_id(row.volume_id) print(\"%s %s %s 20\" % (r_bin, z_bin, quadrant)) for i,", "#lr.fit(X, Y) #ri.fit(X, Y) #la.fit(X, Y) #fs.fit(X, Y) #print xvars #(sample_score, test_score) =", "sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\", \"yy\",", "['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...] def combinatrial_vars(vars_str='xyz', length=3): term_list =", "< 1.5pi < q4 < 2pi\") print(\"# header: Rbin Zbin Quadrant Nval_per_compoment(=20)\") print(\"#", "in result.iterrows(): #print(\"// ** %s - R^2 %s\" % (row.volume_id, row.score)) print(\"#\" +", "if 'its' in id: r_bin = 0 if 'tpc' in id: r_bin =", "< tpc < 250 < tof < 400 < tofext < 423 <", "vars].as_matrix() return (X1, X2) # IO () def run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set", "'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz'] yvars = ['Bx', 'By',", "test['Bz']) xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power (X,", "'z', 'Bx', 'By', 'Bz'] df = pd.read_csv(path, sep=' ', names=sample_cols) if cylindrical_axis: df['r']", "-> XX def term(dataframe, vars_str): return product(map(lambda x: dataframe[x], list(vars_str))) # (f(X), Y)", "= df.Bt * np.cos(df.Bpsi) df['Bp'] = df.Bt * np.sin(df.Bpsi) if absolute_axis: df['X'] =", "'y', 'z', 'xx', 'xy', 'xz', 'yy', ...] def combinatrial_vars(vars_str='xyz', length=3): term_list = []", "(max_dev / prec * 100, avg_dev / prec * 100) return (max_dev, max_pct,", "trace(\"reading test samples...\" + test_file) test = load_samples(test_file, genvars=xvars_full) trace(\"linear regression fit...\") lr", "Y) (max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct) = (max_dev / prec *", "+ df.y**2) df['p'] = np.arctan2(df.y, df.x) df['Bt'] = np.sqrt(df.Bx**2 + df.By**2) df['Bpsi'] =", "y, z upto 3 dims trace(\"reading training samples... \" + sample_file) df =", "= choose(xvars, df, test) for y in yvars: fs.fit(X, df[y]) res = pd.DataFrame({", "df['Y'] = np.abs(df.y) df['Z'] = np.abs(df.z) for var in genvars: df[var] = term(df,", "range(len(yvars)): # arr = [lr.intercept_[i]] + lr.coef_[i] # arr = [ str(x) for", "r_bin = 2 if 'tofext' in id: r_bin = 3 if 'cal' in", "determination in multiple linear regression. [0,1]\") print(\"\") for index, row in result.iterrows(): #print(\"//", "header: Rbin Zbin Quadrant Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment x floats\") #print(\"# R^2: coefficient", "R^2[test] %.8f\" % test_score2) #print(la.coef_) #for i in range(len(yvars)): # print(yvars[i]) # print(pd.DataFrame({\"Name\":", "= deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % sample_dev)", "raise AssertionError('Unknown field strengh: %s' % path) # ['x', 'y', 'z', 'xx', 'xy',", "\"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)] }) def volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat', '')\\ .replace('-', '_')", "def volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat', '')\\ .replace('-', '_') def get_location_by_volume_id(id): if 'its' in", "return r_bin, z_bin, quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This file was generated", "for x in coef ] body = ' '.join(arr) #decl = \"const double[]", "= { \" + ', '.join(arr) + \" }\") # print(\"deviation stat [test]:", "Ytest)) #print(\"linear R^2[sample] %.8f\" % sample_score) #print(\"linear R^2[test] %.8f\" % test_score) #(sample_score2, test_score2)", "quadrant)) for i, yvar in enumerate(row.yvars): name = row.volume_id #+ '_' + yvar.lower()", "'yy', ...] def combinatrial_vars(vars_str='xyz', length=3): term_list = [] for l in range(length): term_list.extend([''.join(v)", "< q1 < 0.5pi < q2 < pi < q3 < 1.5pi <", "'.join(yvars) + \" = 1 + \" + ' + '.join(xvars)) test_dev =", "# arr = [lr.intercept_[i]] + lr.coef_[i] # arr = [ str(x) for x", "\"tofext2k_z0_q4\" -> 0 if 'q1' in id: quadrant = 0 if 'q2' in", "if 'q1' in id: quadrant = 0 if 'q2' in id: quadrant =", "+ \" = 1 + \" + ' + '.join(result.iloc[0].xvars)) print(\"# barrel r:", "[] for l in range(length): term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])", "Quadrant Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment x floats\") #print(\"# R^2: coefficient of determination in", "% sample_score2) #print(\"lasso R^2[test] %.8f\" % test_score2) #print(la.coef_) #for i in range(len(yvars)): #", "% lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X), Y, prec=precision) #test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2", "423 < cal < 500\") print(\"# barrel z: -550 < z < 550\")", "product :: a#* => [a] -> a def product(xs): return reduce(operator.mul, xs, 1)", "0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi", "\"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\": [sample_score], \"score\": [test_score], \"coeffs\":", "in id: quadrant = 1 if 'q3' in id: quadrant = 2 if", "#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) #print(\"linear R^2[sample] %.8f\" % sample_score) #print(\"linear", "i+1): #(X, Xtest) = choose(xvars, df, test) #lr.fit(X, Y) #ri.fit(X, Y) #la.fit(X, Y)", "enable trace def trace(x): global debug if debug: print(x) trace(\"loading...\") from itertools import", "choose(yvars, df, test) #(Y, Ytest) = (df['Bz'], test['Bz']) xvars = combinatrial_vars('xyz', 3) #", "test_score2) #print(la.coef_) #for i in range(len(yvars)): # print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params'))", "# variables except x, y, z upto 3 dims trace(\"reading training samples... \"", "in id: quadrant = 3 return r_bin, z_bin, quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\")", "Ytest) = (df['Bz'], test['Bz']) xvars = combinatrial_vars('xyz', 3) # use all terms upto", "in id: quadrant = 2 if 'q4' in id: quadrant = 3 return", "id: quadrant = 3 return r_bin, z_bin, quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"#", "4 z_bin = int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" -> 0 if 'q1' in id: quadrant", "term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)]) return term_list # product ::", "550\") print(\"# phi: 0 < q1 < 0.5pi < q2 < pi <", "if 'tof' in id: r_bin = 2 if 'tofext' in id: r_bin =", "print(\"# \" + ', '.join(result.iloc[0].yvars) + \" = 1 + \" + '", "load_samples(test_file, genvars=xvars_full) trace(\"linear regression fit...\") lr = sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV() #la =", "x for x in coef ] body = ' '.join(arr) #decl = \"const", "df = pd.read_csv(path, sep=' ', names=sample_cols) if cylindrical_axis: df['r'] = np.sqrt(df.x**2 + df.y**2)", "}\") # print(\"deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % #", "def trace(x): global debug if debug: print(x) trace(\"loading...\") from itertools import combinations, combinations_with_replacement", "foldl in Haskell # (XYZ, \"xx\") -> XX def term(dataframe, vars_str): return product(map(lambda", "avg_pct) # IO Df def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols = ['x', 'y',", "genvars=[]): sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz'] df = pd.read_csv(path, sep='", "= run_analysis(sample_file, test_set[i]) result = result.append(df, ignore_index=True) write_header(result) def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file", "trace(', '.join(yvars) + \" = 1 + \" + ' + '.join(xvars)) test_dev", "write_header(result) def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df, test, lr,", "trace(\"linear regression R^2 [train data]: %.8f\" % sample_score) trace(\"linear regression R^2 [test data]", "[test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\": [sample_score], \"score\": [test_score], \"coeffs\": [lr.coef_],", "tofext < 423 < cal < 500\") print(\"# barrel z: -550 < z", "\"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\": [sample_score], \"score\": [test_score], \"coeffs\": [lr.coef_], \"intercept\":", "', names=sample_cols) if cylindrical_axis: df['r'] = np.sqrt(df.x**2 + df.y**2) df['p'] = np.arctan2(df.y, df.x)", "test_score) #(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest)) #print(\"lasso R^2[sample] %.8f\" % sample_score2)", "% (row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef = [row['intercept'][i]] + list(row['coeffs'][i]) arr =", "row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef = [row['intercept'][i]] + list(row['coeffs'][i]) arr = [ \"%.5e\"", "xvars #(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) #print(\"linear R^2[sample] %.8f\" % sample_score)", "#ri = sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars =", "id: r_bin = 4 z_bin = int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" -> 0 if 'q1'", "def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This file was generated from sysid.py at \"", "'xy', 'xz', 'yy', ...] def combinatrial_vars(vars_str='xyz', length=3): term_list = [] for l in", "np.abs(fX - Y) (max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct) = (max_dev /", "elif '5k' in path: return 0.005 else: raise AssertionError('Unknown field strengh: %s' %", "%.8f\" % sample_score2) #print(\"lasso R^2[test] %.8f\" % test_score2) #print(la.coef_) #for i in range(len(yvars)):", "+ \" = 1 + \" + ' + '.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest),", "and len(sample_set) > 0) result = pd.DataFrame() for i, sample_file in enumerate(sample_set): trace(\"run_analysis('%s',", "lr.coef_[i] # arr = [ str(x) for x in arr ] # print(yvars[i]", "sample_score) trace(\"linear regression R^2 [test data] : %.8f\" % test_score) return pd.DataFrame( {", "import sklearn.feature_selection import datetime def prec_from_pathname(path): if '2k' in path: return 0.002 elif", "np.arctan2(df.y, df.x) df['Bt'] = np.sqrt(df.Bx**2 + df.By**2) df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y,", "(max deviation, max%, avg dev, avg%) def deviation_stat(fX, Y, prec=0.005): dev = np.abs(fX", "r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id) print(\"%s %s %s 20\" % (r_bin, z_bin, quadrant))", "term(df, var) return df def choose(vars, df1, df2): X1 = df1.loc[:, vars].as_matrix() X2", "# use all terms upto 3rd power (X, Xtest) = choose(xvars, df, test)", "samples...\" + test_file) test = load_samples(test_file, genvars=xvars_full) trace(\"linear regression fit...\") lr = sklearn.linear_model.LinearRegression()", "if 'q3' in id: quadrant = 2 if 'q4' in id: quadrant =", "x in coef ] body = ' '.join(arr) #decl = \"const double[] %s", "= row.volume_id #+ '_' + yvar.lower() print(\"# precision: tgt %.2e max %.2e (%.1f%%)", "result = pd.DataFrame() for i, sample_file in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" % (sample_file, test_set[i]))", "Ytest)) #print(\"lasso R^2[sample] %.8f\" % sample_score2) #print(\"lasso R^2[test] %.8f\" % test_score2) #print(la.coef_) #for", "#print(la.coef_) #for i in range(len(yvars)): # print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) #", "xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result precision = prec_from_pathname(sample_file) assert(precision ==", "dev, avg%) def deviation_stat(fX, Y, prec=0.005): dev = np.abs(fX - Y) (max_dev, avg_dev)", "was generated from sysid.py at \" + str(datetime.datetime.today())) print(\"# \" + ', '.join(result.iloc[0].yvars)", "def prec_from_pathname(path): if '2k' in path: return 0.002 elif '5k' in path: return", "0.002 elif '5k' in path: return 0.005 else: raise AssertionError('Unknown field strengh: %s'", "in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" % (sample_file, test_set[i])) df = run_analysis(sample_file, test_set[i]) result =", "= np.abs(df.z) for var in genvars: df[var] = term(df, var) return df def", "(row.volume_id, row.score)) print(\"#\" + row.volume_id) r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id) print(\"%s %s %s", "load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz'] df", "combinations_with_replacement(list(vars_str), 1 + l)]) return term_list # product :: a#* => [a] ->", "(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)", "test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result precision =", "genvars=xvars_full) trace(\"reading test samples...\" + test_file) test = load_samples(test_file, genvars=xvars_full) trace(\"linear regression fit...\")", "quadrant = 2 if 'q4' in id: quadrant = 3 return r_bin, z_bin,", "+ str(datetime.datetime.today())) print(\"# \" + ', '.join(result.iloc[0].yvars) + \" = 1 + \"", "'.join(result.iloc[0].xvars)) print(\"# barrel r: 0 < its < 80 < tpc < 250", "%.8f\" % test_score) #(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest)) #print(\"lasso R^2[sample] %.8f\"", "+ test_file) test = load_samples(test_file, genvars=xvars_full) trace(\"linear regression fit...\") lr = sklearn.linear_model.LinearRegression() #ri", "import basename import matplotlib.pyplot as plt import numpy as np import pandas as", "};\\n\" % (name, body) #print(decl) print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for i in range(10):", "terms upto 3rd power (X, Xtest) = choose(xvars, df, test) for y in", "pandas as pd import sklearn.linear_model import sklearn.feature_selection import datetime def prec_from_pathname(path): if '2k'", "#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample] max %.2e", "(%.1f%%)\" % test_dev ) #print(\"lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" %", "Haskell # (XYZ, \"xx\") -> XX def term(dataframe, vars_str): return product(map(lambda x: dataframe[x],", "df['X'] = np.abs(df.x) df['Y'] = np.abs(df.y) df['Z'] = np.abs(df.z) for var in genvars:", "str(x) for x in arr ] # print(yvars[i] + \" = { \"", "deviation, max%, avg dev, avg%) def deviation_stat(fX, Y, prec=0.005): dev = np.abs(fX -", "%s\" % (row.volume_id, row.score)) print(\"#\" + row.volume_id) r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id) print(\"%s", "\"zz\", 'x', 'y', 'z', 'xzz', 'yzz'] #xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr',", "}) trace(y) trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(', '.join(yvars) + \" =", "\"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(', '.join(yvars) + \" = 1 + \" +", "in genvars: df[var] = term(df, var) return df def choose(vars, df1, df2): X1", "df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result precision", "ana_result precision = prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz', 3)[3:] # variables", "if 'tofext' in id: r_bin = 3 if 'cal' in id: r_bin =", "#print(\"lasso R^2[test] %.8f\" % test_score2) #print(la.coef_) #for i in range(len(yvars)): # print(yvars[i]) #", "+ ' + '.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for i in range(len(yvars)):", "** %s - R^2 %s\" % (row.volume_id, row.score)) print(\"#\" + row.volume_id) r_bin, z_bin,", "= np.abs(df.y) df['Z'] = np.abs(df.z) for var in genvars: df[var] = term(df, var)", "test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set) == len(test_set) and len(sample_set) > 0) result", "= 3 return r_bin, z_bin, quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This file", "% sample_score) #print(\"linear R^2[test] %.8f\" % test_score) #(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest,", "assert(precision == prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z", "in id: r_bin = 0 if 'tpc' in id: r_bin = 1 if", "['x', 'y', 'z', 'Bx', 'By', 'Bz'] df = pd.read_csv(path, sep=' ', names=sample_cols) if", "% test_score2) #print(la.coef_) #for i in range(len(yvars)): # print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars, \"Params\":", "test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) #print(\"linear R^2[sample] %.8f\" % sample_score) #print(\"linear R^2[test]", "def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']", "v in combinations_with_replacement(list(vars_str), 1 + l)]) return term_list # product :: a#* =>", "Rbin Zbin Quadrant Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment x floats\") #print(\"# R^2: coefficient of", "r_bin = 3 if 'cal' in id: r_bin = 4 z_bin = int(id.split('_')[1][1:])", "yvars: fs.fit(X, df[y]) res = pd.DataFrame({ \"term\": xvars, \"rank\": fs.ranking_ }) trace(y) trace(res.sort_values(by", "'q4' in id: quadrant = 3 return r_bin, z_bin, quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\")", "def deviation_stat(fX, Y, prec=0.005): dev = np.abs(fX - Y) (max_dev, avg_dev) = (dev.max(axis=0),", "Y) #fs.fit(X, Y) #print xvars #(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) #print(\"linear", "debug if debug: print(x) trace(\"loading...\") from itertools import combinations, combinations_with_replacement from glob import", "#(X, Xtest) = choose(xvars, df, test) #lr.fit(X, Y) #ri.fit(X, Y) #la.fit(X, Y) #fs.fit(X,", "= (la.score(X, Y), la.score(Xtest, Ytest)) #print(\"lasso R^2[sample] %.8f\" % sample_score2) #print(\"lasso R^2[test] %.8f\"", "#(Y, Ytest) = (df['Bz'], test['Bz']) xvars = combinatrial_vars('xyz', 3) # use all terms", "[test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\": [sample_score], \"score\": [test_score], \"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\": [sample_file],", "row['avg%'][i])) coef = [row['intercept'][i]] + list(row['coeffs'][i]) arr = [ \"%.5e\" % x for", "in path: return 0.005 else: raise AssertionError('Unknown field strengh: %s' % path) #", "return term_list # product :: a#* => [a] -> a def product(xs): return", "Y, Xtest, Ytest, ana_result precision = prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz',", "'yzz'] #xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']", "pd.DataFrame({ \"term\": xvars, \"rank\": fs.ranking_ }) trace(y) trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y)", "in id: r_bin = 1 if 'tof' in id: r_bin = 2 if", "deviation_stat(lr.predict(X), Y, prec=precision) #test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)", "(%.1f%%)\" % # ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] )) (sample_score, test_score) = (lr.score(X,", "path: return 0.005 else: raise AssertionError('Unknown field strengh: %s' % path) # ['x',", "print(\"deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % # ( test_dev[0][i],", "-> 0 if 'q1' in id: quadrant = 0 if 'q2' in id:", "%.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % (row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))", "print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for i in range(10): # for xvars in combinations(xvars_full,", "prec=precision) #test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample] max", "regression fit...\") lr = sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV() fs =", "np import pandas as pd import sklearn.linear_model import sklearn.feature_selection import datetime def prec_from_pathname(path):", "var) return df def choose(vars, df1, df2): X1 = df1.loc[:, vars].as_matrix() X2 =", "# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...] def combinatrial_vars(vars_str='xyz', length=3): term_list", "row.volume_id #+ '_' + yvar.lower() print(\"# precision: tgt %.2e max %.2e (%.1f%%) avg", "= (max_dev / prec * 100, avg_dev / prec * 100) return (max_dev,", "arr = [lr.intercept_[i]] + lr.coef_[i] # arr = [ str(x) for x in", "deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample] max %.2e (%.1f%%) avg", "def term(dataframe, vars_str): return product(map(lambda x: dataframe[x], list(vars_str))) # (f(X), Y) -> (max", "sample_file) df = load_samples(sample_file, genvars=xvars_full) trace(\"reading test samples...\" + test_file) test = load_samples(test_file,", "= 0 if 'tpc' in id: r_bin = 1 if 'tof' in id:", "Y, prec=0.005): dev = np.abs(fX - Y) (max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0)) (max_pct,", "#la.fit(X, Y) #fs.fit(X, Y) #print xvars #(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))", "q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4", "+ ', '.join(arr) + \" }\") # print(\"deviation stat [test]: max %.2e (%.1f%%)", "= ['x', 'y', 'z', 'Bx', 'By', 'Bz'] df = pd.read_csv(path, sep=' ', names=sample_cols)", "Y) #la.fit(X, Y) #fs.fit(X, Y) #print xvars #(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest,", "'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest,", "volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat', '')\\ .replace('-', '_') def get_location_by_volume_id(id): if 'its' in id:", "< 2pi\") print(\"# header: Rbin Zbin Quadrant Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment x floats\")", "'Bz'] #yvars = ['Bz'] (Y, Ytest) = choose(yvars, df, test) #(Y, Ytest) =", "Y) #ri.fit(X, Y) #la.fit(X, Y) #fs.fit(X, Y) #print xvars #(sample_score, test_score) = (lr.score(X,", "assert(len(sample_set) == len(test_set) and len(sample_set) > 0) result = pd.DataFrame() for i, sample_file", "test_dev[2][i], test_dev[3][i] )) (sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) trace(\"linear regression R^2", "import glob from math import * import operator from os.path import basename import", "-> a def product(xs): return reduce(operator.mul, xs, 1) # foldl in Haskell #", "arr ] # print(yvars[i] + \" = { \" + ', '.join(arr) +", "lr.score(Xtest, Ytest)) #print(\"linear R^2[sample] %.8f\" % sample_score) #print(\"linear R^2[test] %.8f\" % test_score) #(sample_score2,", "i in range(10): # for xvars in combinations(xvars_full, i+1): #(X, Xtest) = choose(xvars,", "for xvars in combinations(xvars_full, i+1): #(X, Xtest) = choose(xvars, df, test) #lr.fit(X, Y)", "sysid.py at \" + str(datetime.datetime.today())) print(\"# \" + ', '.join(result.iloc[0].yvars) + \" =", "import pandas as pd import sklearn.linear_model import sklearn.feature_selection import datetime def prec_from_pathname(path): if", "trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(', '.join(yvars) + \" = 1 +", "precision = prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except", "test_dev ) #print(\"lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % test_dev2 )", "#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz'] yvars", "(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) trace(\"linear regression R^2 [train data]: %.8f\"", "# product :: a#* => [a] -> a def product(xs): return reduce(operator.mul, xs,", "id: quadrant = 2 if 'q4' in id: quadrant = 3 return r_bin,", "'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz'] yvars =", "= sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\",", "(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef = [row['intercept'][i]] + list(row['coeffs'][i]) arr = [", "if 'tpc' in id: r_bin = 1 if 'tof' in id: r_bin =", "#for i in range(10): # for xvars in combinations(xvars_full, i+1): #(X, Xtest) =", "xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\" % lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)", "[test_score], \"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\": [sample_file], \"test_file\": [test_file], \"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)]", "(%.1f%%) avg %.2e (%.1f%%)\" % sample_dev) #print(\"[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\"", "sklearn.linear_model import sklearn.feature_selection import datetime def prec_from_pathname(path): if '2k' in path: return 0.002", "df['Bt'] = np.sqrt(df.Bx**2 + df.By**2) df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x) df['Br']", "\" + ', '.join(arr) + \" }\") # print(\"deviation stat [test]: max %.2e", "max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % test_dev ) #print(\"lasso [test] max %.2e", "xvars in combinations(xvars_full, i+1): #(X, Xtest) = choose(xvars, df, test) #lr.fit(X, Y) #ri.fit(X,", "== prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto", "IO Df def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols = ['x', 'y', 'z', 'Bx',", "% path) # ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...] def combinatrial_vars(vars_str='xyz',", "(XYZ, \"xx\") -> XX def term(dataframe, vars_str): return product(map(lambda x: dataframe[x], list(vars_str))) #", "() def run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set) ==", "xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims", "quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This file was generated from sysid.py at", "= { %s };\\n\" % (name, body) #print(decl) print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for", "(lr.score(X, Y), lr.score(Xtest, Ytest)) #print(\"linear R^2[sample] %.8f\" % sample_score) #print(\"linear R^2[test] %.8f\" %", "#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\", \"yy\", \"zz\", 'x', 'y', 'z', 'xzz', 'yzz']", "in range(length): term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)]) return term_list #", "r_bin, z_bin, quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This file was generated from", "\"const double[] %s = { %s };\\n\" % (name, body) #print(decl) print(body) print(\"\")", "= 4 z_bin = int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" -> 0 if 'q1' in id:", "\" = 1 + \" + ' + '.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest), Ytest,", "X1 = df1.loc[:, vars].as_matrix() X2 = df2.loc[:, vars].as_matrix() return (X1, X2) # IO", "1) # foldl in Haskell # (XYZ, \"xx\") -> XX def term(dataframe, vars_str):", "= ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr'] #xvars=['x', 'xzz',", ":: a#* => [a] -> a def product(xs): return reduce(operator.mul, xs, 1) #", "= np.abs(fX - Y) (max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct) = (max_dev", "(la.score(X, Y), la.score(Xtest, Ytest)) #print(\"lasso R^2[sample] %.8f\" % sample_score2) #print(\"lasso R^2[test] %.8f\" %", "%s };\\n\" % (name, body) #print(decl) print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for i in", "/ prec * 100, avg_dev / prec * 100) return (max_dev, max_pct, avg_dev,", "xvars, yvars, X, Y, Xtest, Ytest, ana_result precision = prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file))", "(dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec", "'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz'] yvars = ['Bx', 'By', 'Bz'] #yvars", "print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\" % lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X), Y,", "itertools import combinations, combinations_with_replacement from glob import glob from math import * import", "# foldl in Haskell # (XYZ, \"xx\") -> XX def term(dataframe, vars_str): return", "term_list # product :: a#* => [a] -> a def product(xs): return reduce(operator.mul,", "a def product(xs): return reduce(operator.mul, xs, 1) # foldl in Haskell # (XYZ,", "\"xx\") -> XX def term(dataframe, vars_str): return product(map(lambda x: dataframe[x], list(vars_str))) # (f(X),", "- Y) (max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct) = (max_dev / prec", "def get_location_by_volume_id(id): if 'its' in id: r_bin = 0 if 'tpc' in id:", "dev.mean(axis=0)) (max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec *", "3 return r_bin, z_bin, quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This file was", "= np.sqrt(df.x**2 + df.y**2) df['p'] = np.arctan2(df.y, df.x) df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)", "row.volume_id) r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id) print(\"%s %s %s 20\" % (r_bin, z_bin,", "3)[3:] # variables except x, y, z upto 3 dims trace(\"reading training samples...", "cylindrical_axis: df['r'] = np.sqrt(df.x**2 + df.y**2) df['p'] = np.arctan2(df.y, df.x) df['Bt'] = np.sqrt(df.Bx**2", "sklearn.feature_selection import datetime def prec_from_pathname(path): if '2k' in path: return 0.002 elif '5k'", "in id: r_bin = 4 z_bin = int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" -> 0 if", "< its < 80 < tpc < 250 < tof < 400 <", "' + '.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for i in range(len(yvars)): #", "return product(map(lambda x: dataframe[x], list(vars_str))) # (f(X), Y) -> (max deviation, max%, avg", "if 'cal' in id: r_bin = 4 z_bin = int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" ->", "df[y]) res = pd.DataFrame({ \"term\": xvars, \"rank\": fs.ranking_ }) trace(y) trace(res.sort_values(by = \"rank\"))", "test) #lr.fit(X, Y) #ri.fit(X, Y) #la.fit(X, Y) #fs.fit(X, Y) #print xvars #(sample_score, test_score)", "test_score2) = (la.score(X, Y), la.score(Xtest, Ytest)) #print(\"lasso R^2[sample] %.8f\" % sample_score2) #print(\"lasso R^2[test]", "#print(\"lasso R^2[sample] %.8f\" % sample_score2) #print(\"lasso R^2[test] %.8f\" % test_score2) #print(la.coef_) #for i", "= [] for l in range(length): term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 +", "R^2[sample] %.8f\" % sample_score) #print(\"linear R^2[test] %.8f\" % test_score) #(sample_score2, test_score2) = (la.score(X,", "multiple linear regression. [0,1]\") print(\"\") for index, row in result.iterrows(): #print(\"// ** %s", "sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\", \"yy\", \"zz\", 'x', 'y',", "its < 80 < tpc < 250 < tof < 400 < tofext", "% x for x in coef ] body = ' '.join(arr) #decl =", "combinatrial_vars('xyz', 3) # use all terms upto 3rd power (X, Xtest) = choose(xvars,", "1 if 'q3' in id: quadrant = 2 if 'q4' in id: quadrant", "100) return (max_dev, max_pct, avg_dev, avg_pct) # IO Df def load_samples(path, cylindrical_axis=True, absolute_axis=True,", "test_dev[1][i], test_dev[2][i], test_dev[3][i] )) (sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) trace(\"linear regression", "#print(\"# R^2: coefficient of determination in multiple linear regression. [0,1]\") print(\"\") for index,", "avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100) return", "'cal' in id: r_bin = 4 z_bin = int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" -> 0", "'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz'] yvars = ['Bx',", "pd.DataFrame( { \"xvars\": [xvars], \"yvars\": [yvars], \"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\":", "%.2e (%.1f%%) avg %.2e (%.1f%%)\" % sample_dev) #print(\"[test] max %.2e (%.1f%%) avg %.2e", "dims trace(\"reading training samples... \" + sample_file) df = load_samples(sample_file, genvars=xvars_full) trace(\"reading test", "import operator from os.path import basename import matplotlib.pyplot as plt import numpy as", "for v in combinations_with_replacement(list(vars_str), 1 + l)]) return term_list # product :: a#*", "for var in genvars: df[var] = term(df, var) return df def choose(vars, df1,", "%.2e (%.1f%%) avg %.2e (%.1f%%)\" % test_dev ) #print(\"lasso [test] max %.2e (%.1f%%)", "1 + l)]) return term_list # product :: a#* => [a] -> a", "avg_dev) = (dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct) = (max_dev / prec * 100, avg_dev", "sep=' ', names=sample_cols) if cylindrical_axis: df['r'] = np.sqrt(df.x**2 + df.y**2) df['p'] = np.arctan2(df.y,", "Y), la.score(Xtest, Ytest)) #print(\"lasso R^2[sample] %.8f\" % sample_score2) #print(\"lasso R^2[test] %.8f\" % test_score2)", "- R^2 %s\" % (row.volume_id, row.score)) print(\"#\" + row.volume_id) r_bin, z_bin, quadrant =", "'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr'] #xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy',", "absolute_axis=True, genvars=[]): sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz'] df = pd.read_csv(path,", "[ str(x) for x in arr ] # print(yvars[i] + \" = {", "\"score\": [test_score], \"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\": [sample_file], \"test_file\": [test_file], \"precision\": [precision], \"volume_id\":", "phi: 0 < q1 < 0.5pi < q2 < pi < q3 <", "from math import * import operator from os.path import basename import matplotlib.pyplot as", "# for xvars in combinations(xvars_full, i+1): #(X, Xtest) = choose(xvars, df, test) #lr.fit(X,", "(%.1f%%) avg %.2e (%.1f%%)\" % # ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] )) (sample_score,", "def product(xs): return reduce(operator.mul, xs, 1) # foldl in Haskell # (XYZ, \"xx\")", "Ytest, prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample] max %.2e (%.1f%%) avg %.2e", "[lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\": [sample_file], \"test_file\": [test_file], \"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)] }) def", "1 + \" + ' + '.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for", "sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set) == len(test_set) and len(sample_set)", "yvars, X, Y, Xtest, Ytest, ana_result precision = prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file)) xvars_full", "= [\"xx\", \"yy\", \"zz\", 'x', 'y', 'z', 'xzz', 'yzz'] #xvars = ['xxxr', 'xrrX',", "+ \" + ' + '.join(result.iloc[0].xvars)) print(\"# barrel r: 0 < its <", "i, yvar in enumerate(row.yvars): name = row.volume_id #+ '_' + yvar.lower() print(\"# precision:", "if absolute_axis: df['X'] = np.abs(df.x) df['Y'] = np.abs(df.y) df['Z'] = np.abs(df.z) for var", "name = row.volume_id #+ '_' + yvar.lower() print(\"# precision: tgt %.2e max %.2e", "in combinations(xvars_full, i+1): #(X, Xtest) = choose(xvars, df, test) #lr.fit(X, Y) #ri.fit(X, Y)", "term_list = [] for l in range(length): term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1", "+ \" }\") # print(\"deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)\"", "fs.fit(X, df[y]) res = pd.DataFrame({ \"term\": xvars, \"rank\": fs.ranking_ }) trace(y) trace(res.sort_values(by =", "= 1 if 'q3' in id: quadrant = 2 if 'q4' in id:", "df['Bp'] = df.Bt * np.sin(df.Bpsi) if absolute_axis: df['X'] = np.abs(df.x) df['Y'] = np.abs(df.y)", "(df['Bz'], test['Bz']) xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power", "df.x) df['Bt'] = np.sqrt(df.Bx**2 + df.By**2) df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)", "body = ' '.join(arr) #decl = \"const double[] %s = { %s };\\n\"", "#print(\"[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % test_dev ) #print(\"lasso [test] max", "3 dims trace(\"reading training samples... \" + sample_file) df = load_samples(sample_file, genvars=xvars_full) trace(\"reading", "\"sample_score\": [sample_score], \"score\": [test_score], \"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\": [sample_file], \"test_file\": [test_file], \"precision\":", "data] : %.8f\" % test_score) return pd.DataFrame( { \"xvars\": [xvars], \"yvars\": [yvars], \"max_dev\":", "#+ '_' + yvar.lower() print(\"# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e", "= 'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y,", "df['r'] = np.sqrt(df.x**2 + df.y**2) df['p'] = np.arctan2(df.y, df.x) df['Bt'] = np.sqrt(df.Bx**2 +", "(Y, Ytest) = choose(yvars, df, test) #(Y, Ytest) = (df['Bz'], test['Bz']) xvars =", "lr.fit(X, Y) trace(', '.join(yvars) + \" = 1 + \" + ' +", "0 if 'q2' in id: quadrant = 1 if 'q3' in id: quadrant", "fs = sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\", \"yy\", \"zz\",", "< z < 550\") print(\"# phi: 0 < q1 < 0.5pi < q2", "trace(x): global debug if debug: print(x) trace(\"loading...\") from itertools import combinations, combinations_with_replacement from", "+ row.volume_id) r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id) print(\"%s %s %s 20\" % (r_bin,", "field strengh: %s' % path) # ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy',", "row['avg_dev'][i], row['avg%'][i])) coef = [row['intercept'][i]] + list(row['coeffs'][i]) arr = [ \"%.5e\" % x", "os.path import basename import matplotlib.pyplot as plt import numpy as np import pandas", "= glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set) == len(test_set) and len(sample_set) > 0) result =", "in Haskell # (XYZ, \"xx\") -> XX def term(dataframe, vars_str): return product(map(lambda x:", "cal < 500\") print(\"# barrel z: -550 < z < 550\") print(\"# phi:", "df[var] = term(df, var) return df def choose(vars, df1, df2): X1 = df1.loc[:,", "# print(\"+ %e\" % lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X), Y, prec=precision) #test_dev = deviation_stat(lr.predict(Xtest),", "= df2.loc[:, vars].as_matrix() return (X1, X2) # IO () def run_analysis_for_all_fields(): sample_set =", "2pi\") print(\"# header: Rbin Zbin Quadrant Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment x floats\") #print(\"#", "% # ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] )) (sample_score, test_score) = (lr.score(X, Y),", "barrel r: 0 < its < 80 < tpc < 250 < tof", "avg%) def deviation_stat(fX, Y, prec=0.005): dev = np.abs(fX - Y) (max_dev, avg_dev) =", "\" + sample_file) df = load_samples(sample_file, genvars=xvars_full) trace(\"reading test samples...\" + test_file) test", "= sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)", "test_set) assert(len(sample_set) == len(test_set) and len(sample_set) > 0) result = pd.DataFrame() for i,", "in id: quadrant = 0 if 'q2' in id: quadrant = 1 if", "q2 < pi < q3 < 1.5pi < q4 < 2pi\") print(\"# header:", "= \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(', '.join(yvars) + \" = 1 + \"", "(name, body) #print(decl) print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for i in range(10): # for", "X, Y, Xtest, Ytest, ana_result precision = prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file)) xvars_full =", "np.abs(df.y) df['Z'] = np.abs(df.z) for var in genvars: df[var] = term(df, var) return", "avg_dev, avg_pct) # IO Df def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols = ['x',", "Y) trace(', '.join(yvars) + \" = 1 + \" + ' + '.join(xvars))", "plt import numpy as np import pandas as pd import sklearn.linear_model import sklearn.feature_selection", "import datetime def prec_from_pathname(path): if '2k' in path: return 0.002 elif '5k' in", "result.iterrows(): #print(\"// ** %s - R^2 %s\" % (row.volume_id, row.score)) print(\"#\" + row.volume_id)", "max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % sample_dev) #print(\"[test] max %.2e (%.1f%%) avg", "%.8f\" % test_score) return pd.DataFrame( { \"xvars\": [xvars], \"yvars\": [yvars], \"max_dev\": [test_dev[0]], \"max%\":", "r_bin = 0 if 'tpc' in id: r_bin = 1 if 'tof' in", "in arr ] # print(yvars[i] + \" = { \" + ', '.join(arr)", "2 if 'q4' in id: quadrant = 3 return r_bin, z_bin, quadrant def", "print(\"# header: Rbin Zbin Quadrant Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment x floats\") #print(\"# R^2:", "X2 = df2.loc[:, vars].as_matrix() return (X1, X2) # IO () def run_analysis_for_all_fields(): sample_set", "upto 3rd power (X, Xtest) = choose(xvars, df, test) for y in yvars:", "prec=precision) #for i in range(len(yvars)): # arr = [lr.intercept_[i]] + lr.coef_[i] # arr", "%.2e (%.1f%%)\" % sample_dev) #print(\"[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % test_dev", "if 'q2' in id: quadrant = 1 if 'q3' in id: quadrant =", "sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars", "'.join(result.iloc[0].yvars) + \" = 1 + \" + ' + '.join(result.iloc[0].xvars)) print(\"# barrel", "range(10): # for xvars in combinations(xvars_full, i+1): #(X, Xtest) = choose(xvars, df, test)", "import matplotlib.pyplot as plt import numpy as np import pandas as pd import", "( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] )) (sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))", "= [ str(x) for x in arr ] # print(yvars[i] + \" =", "< cal < 500\") print(\"# barrel z: -550 < z < 550\") print(\"#", "[sample_score], \"score\": [test_score], \"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\": [sample_file], \"test_file\": [test_file], \"precision\": [precision],", "+ '.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for i in range(len(yvars)): # arr", "'xzz', 'yzz'] #xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz',", "Y), lr.score(Xtest, Ytest)) trace(\"linear regression R^2 [train data]: %.8f\" % sample_score) trace(\"linear regression", "%.8f\" % sample_score) trace(\"linear regression R^2 [test data] : %.8f\" % test_score) return", "quadrant = 0 if 'q2' in id: quadrant = 1 if 'q3' in", "'its' in id: r_bin = 0 if 'tpc' in id: r_bin = 1", "250 < tof < 400 < tofext < 423 < cal < 500\")", "from glob import glob from math import * import operator from os.path import", "trace(\"loading...\") from itertools import combinations, combinations_with_replacement from glob import glob from math import", "from os.path import basename import matplotlib.pyplot as plt import numpy as np import", "product(xs): return reduce(operator.mul, xs, 1) # foldl in Haskell # (XYZ, \"xx\") ->", "i in range(len(yvars)): # arr = [lr.intercept_[i]] + lr.coef_[i] # arr = [", "row in result.iterrows(): #print(\"// ** %s - R^2 %s\" % (row.volume_id, row.score)) print(\"#\"", "test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for i in range(len(yvars)): # arr = [lr.intercept_[i]]", "test) for y in yvars: fs.fit(X, df[y]) res = pd.DataFrame({ \"term\": xvars, \"rank\":", "from itertools import combinations, combinations_with_replacement from glob import glob from math import *", "df1, df2): X1 = df1.loc[:, vars].as_matrix() X2 = df2.loc[:, vars].as_matrix() return (X1, X2)", "Ytest, prec=precision) #for i in range(len(yvars)): # arr = [lr.intercept_[i]] + lr.coef_[i] #", "] body = ' '.join(arr) #decl = \"const double[] %s = { %s", "0 if 'q1' in id: quadrant = 0 if 'q2' in id: quadrant", "Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment x floats\") #print(\"# R^2: coefficient of determination in multiple", "'By', 'Bz'] df = pd.read_csv(path, sep=' ', names=sample_cols) if cylindrical_axis: df['r'] = np.sqrt(df.x**2", "#result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This file was generated from sysid.py at \" + str(datetime.datetime.today()))", "[volume_id_from_path(sample_file)] }) def volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat', '')\\ .replace('-', '_') def get_location_by_volume_id(id): if", "' '.join(arr) #decl = \"const double[] %s = { %s };\\n\" % (name,", "= np.abs(df.x) df['Y'] = np.abs(df.y) df['Z'] = np.abs(df.z) for var in genvars: df[var]", "= [lr.intercept_[i]] + lr.coef_[i] # arr = [ str(x) for x in arr", "= True # enable trace def trace(x): global debug if debug: print(x) trace(\"loading...\")", "< 500\") print(\"# barrel z: -550 < z < 550\") print(\"# phi: 0", "'z', 'xx', 'xy', 'xz', 'yy', ...] def combinatrial_vars(vars_str='xyz', length=3): term_list = [] for", "z_bin, quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This file was generated from sysid.py", "df.x) df['Br'] = df.Bt * np.cos(df.Bpsi) df['Bp'] = df.Bt * np.sin(df.Bpsi) if absolute_axis:", "1, verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\", \"yy\", \"zz\", 'x', 'y', 'z',", "'xz', 'yzz'] yvars = ['Bx', 'By', 'Bz'] #yvars = ['Bz'] (Y, Ytest) =", "3rd power (X, Xtest) = choose(xvars, df, test) for y in yvars: fs.fit(X,", "trace(y) trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(', '.join(yvars) + \" = 1", "= choose(yvars, df, test) #(Y, Ytest) = (df['Bz'], test['Bz']) xvars = combinatrial_vars('xyz', 3)", "deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % sample_dev) #print(\"[test]", "ignore_index=True) write_header(result) def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df, test,", "df['p'] = np.arctan2(df.y, df.x) df['Bt'] = np.sqrt(df.Bx**2 + df.By**2) df['Bpsi'] = np.arctan2(df.By, df.Bx)", "yvars = ['Bx', 'By', 'Bz'] #yvars = ['Bz'] (Y, Ytest) = choose(yvars, df,", "all terms upto 3rd power (X, Xtest) = choose(xvars, df, test) for y", "datetime def prec_from_pathname(path): if '2k' in path: return 0.002 elif '5k' in path:", "combinations_with_replacement from glob import glob from math import * import operator from os.path", "operator from os.path import basename import matplotlib.pyplot as plt import numpy as np", "print(yvars[i] + \" = { \" + ', '.join(arr) + \" }\") #", "{ %s };\\n\" % (name, body) #print(decl) print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for i", "Nval_per_compoment x floats\") #print(\"# R^2: coefficient of determination in multiple linear regression. [0,1]\")", "np.abs(df.z) for var in genvars: df[var] = term(df, var) return df def choose(vars,", "floats\") #print(\"# R^2: coefficient of determination in multiple linear regression. [0,1]\") print(\"\") for", "% sample_dev) #print(\"[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % test_dev ) #print(\"lasso", "'yzz'] yvars = ['Bx', 'By', 'Bz'] #yvars = ['Bz'] (Y, Ytest) = choose(yvars,", "test_dev[3][i] )) (sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) trace(\"linear regression R^2 [train", "\"yy\", \"zz\", 'x', 'y', 'z', 'xzz', 'yzz'] #xvars = ['xxxr', 'xrrX', 'zzrX', 'p',", "< pi < q3 < 1.5pi < q4 < 2pi\") print(\"# header: Rbin", "= result.append(df, ignore_index=True) write_header(result) def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global precision,", "trace def trace(x): global debug if debug: print(x) trace(\"loading...\") from itertools import combinations,", "r_bin = 1 if 'tof' in id: r_bin = 2 if 'tofext' in", "% sample_score) trace(\"linear regression R^2 [test data] : %.8f\" % test_score) return pd.DataFrame(", "[test_file], \"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)] }) def volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat', '')\\ .replace('-',", "vars].as_matrix() X2 = df2.loc[:, vars].as_matrix() return (X1, X2) # IO () def run_analysis_for_all_fields():", "=> [a] -> a def product(xs): return reduce(operator.mul, xs, 1) # foldl in", "for y in yvars: fs.fit(X, df[y]) res = pd.DataFrame({ \"term\": xvars, \"rank\": fs.ranking_", "lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\" % lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X), Y, prec=precision) #test_dev =", "'xrrY', 'xzrX', 'xxxz', 'xzzr'] #xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z',", "print(\"+ %e\" % lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X), Y, prec=precision) #test_dev = deviation_stat(lr.predict(Xtest), Ytest,", "trace(\"run_analysis('%s', '%s')\" % (sample_file, test_set[i])) df = run_analysis(sample_file, test_set[i]) result = result.append(df, ignore_index=True)", "term(dataframe, vars_str): return product(map(lambda x: dataframe[x], list(vars_str))) # (f(X), Y) -> (max deviation,", "\" + ' + '.join(result.iloc[0].xvars)) print(\"# barrel r: 0 < its < 80", "0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi\")", "#!/usr/bin/env python debug = True # enable trace def trace(x): global debug if", "id: quadrant = 1 if 'q3' in id: quadrant = 2 if 'q4'", "% (name, body) #print(decl) print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for i in range(10): #", "'5k' in path: return 0.005 else: raise AssertionError('Unknown field strengh: %s' % path)", "(%.1f%%)\" % (row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef = [row['intercept'][i]] + list(row['coeffs'][i]) arr", "la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result precision = prec_from_pathname(sample_file) assert(precision", "choose(xvars, df, test) #lr.fit(X, Y) #ri.fit(X, Y) #la.fit(X, Y) #fs.fit(X, Y) #print xvars", "pd import sklearn.linear_model import sklearn.feature_selection import datetime def prec_from_pathname(path): if '2k' in path:", "def run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set) == len(test_set)", "i, sample_file in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" % (sample_file, test_set[i])) df = run_analysis(sample_file, test_set[i])", "sample_dev) #print(\"[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % test_dev ) #print(\"lasso [test]", "(r_bin, z_bin, quadrant)) for i, yvar in enumerate(row.yvars): name = row.volume_id #+ '_'", "fit...\") lr = sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr,", "if cylindrical_axis: df['r'] = np.sqrt(df.x**2 + df.y**2) df['p'] = np.arctan2(df.y, df.x) df['Bt'] =", "id: quadrant = 0 if 'q2' in id: quadrant = 1 if 'q3'", "'tofext' in id: r_bin = 3 if 'cal' in id: r_bin = 4", "print(\"#\" + row.volume_id) r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id) print(\"%s %s %s 20\" %", "genvars=xvars_full) trace(\"linear regression fit...\") lr = sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV()", "= 1 + \" + ' + '.join(result.iloc[0].xvars)) print(\"# barrel r: 0 <", "# IO Df def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols = ['x', 'y', 'z',", "trace(\"reading training samples... \" + sample_file) df = load_samples(sample_file, genvars=xvars_full) trace(\"reading test samples...\"", "file was generated from sysid.py at \" + str(datetime.datetime.today())) print(\"# \" + ',", "# print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\" % lr.intercept_[i]) #sample_dev", "= 0 if 'q2' in id: quadrant = 1 if 'q3' in id:", "/ prec * 100) return (max_dev, max_pct, avg_dev, avg_pct) # IO Df def", "'z', 'y', 'xz', 'yzz'] yvars = ['Bx', 'By', 'Bz'] #yvars = ['Bz'] (Y,", "la.score(Xtest, Ytest)) #print(\"lasso R^2[sample] %.8f\" % sample_score2) #print(\"lasso R^2[test] %.8f\" % test_score2) #print(la.coef_)", "product(map(lambda x: dataframe[x], list(vars_str))) # (f(X), Y) -> (max deviation, max%, avg dev,", "'xz', 'yy', ...] def combinatrial_vars(vars_str='xyz', length=3): term_list = [] for l in range(length):", "test samples...\" + test_file) test = load_samples(test_file, genvars=xvars_full) trace(\"linear regression fit...\") lr =", "print(\"%s %s %s 20\" % (r_bin, z_bin, quadrant)) for i, yvar in enumerate(row.yvars):", "body) #print(decl) print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for i in range(10): # for xvars", "y in yvars: fs.fit(X, df[y]) res = pd.DataFrame({ \"term\": xvars, \"rank\": fs.ranking_ })", "* import operator from os.path import basename import matplotlib.pyplot as plt import numpy", "np.arctan2(df.y, df.x) df['Br'] = df.Bt * np.cos(df.Bpsi) df['Bp'] = df.Bt * np.sin(df.Bpsi) if", "if '2k' in path: return 0.002 elif '5k' in path: return 0.005 else:", "R^2[test] %.8f\" % test_score) #(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest)) #print(\"lasso R^2[sample]", "l in range(length): term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)]) return term_list", "z < 550\") print(\"# phi: 0 < q1 < 0.5pi < q2 <", "a#* => [a] -> a def product(xs): return reduce(operator.mul, xs, 1) # foldl", "basename(path)\\ .replace('.sample.dat', '')\\ .replace('-', '_') def get_location_by_volume_id(id): if 'its' in id: r_bin =", "sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars", "['Bx', 'By', 'Bz'] #yvars = ['Bz'] (Y, Ytest) = choose(yvars, df, test) #(Y,", "#fs.fit(X, Y) #print xvars #(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) #print(\"linear R^2[sample]", "= 'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df, test, lr, la, xvars_full, xvars,", "df = run_analysis(sample_file, test_set[i]) result = result.append(df, ignore_index=True) write_header(result) def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',", "regression R^2 [test data] : %.8f\" % test_score) return pd.DataFrame( { \"xvars\": [xvars],", "= 1 if 'tof' in id: r_bin = 2 if 'tofext' in id:", "return 0.005 else: raise AssertionError('Unknown field strengh: %s' % path) # ['x', 'y',", "except x, y, z upto 3 dims trace(\"reading training samples... \" + sample_file)", "precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % (row['precision'], row['max_dev'][i], row['max%'][i],", "'xy', 'xx', 'z', 'y', 'xz', 'yzz'] yvars = ['Bx', 'By', 'Bz'] #yvars =", "regression R^2 [train data]: %.8f\" % sample_score) trace(\"linear regression R^2 [test data] :", ".replace('-', '_') def get_location_by_volume_id(id): if 'its' in id: r_bin = 0 if 'tpc'", "df2): X1 = df1.loc[:, vars].as_matrix() X2 = df2.loc[:, vars].as_matrix() return (X1, X2) #", "fs.ranking_ }) trace(y) trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(', '.join(yvars) + \"", "absolute_axis: df['X'] = np.abs(df.x) df['Y'] = np.abs(df.y) df['Z'] = np.abs(df.z) for var in", "load_samples(sample_file, genvars=xvars_full) trace(\"reading test samples...\" + test_file) test = load_samples(test_file, genvars=xvars_full) trace(\"linear regression", "verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\", \"yy\", \"zz\", 'x', 'y', 'z', 'xzz',", "np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x) df['Br'] = df.Bt * np.cos(df.Bpsi) df['Bp'] = df.Bt", "'.join(arr) + \" }\") # print(\"deviation stat [test]: max %.2e (%.1f%%) avg %.2e", "\"volume_id\": [volume_id_from_path(sample_file)] }) def volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat', '')\\ .replace('-', '_') def get_location_by_volume_id(id):", "yvar in enumerate(row.yvars): name = row.volume_id #+ '_' + yvar.lower() print(\"# precision: tgt", "# \"tofext2k_z0_q4\" -> 0 if 'q1' in id: quadrant = 0 if 'q2'", "deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for i in range(len(yvars)): # arr = [lr.intercept_[i]] + lr.coef_[i]", "Ytest) = choose(yvars, df, test) #(Y, Ytest) = (df['Bz'], test['Bz']) xvars = combinatrial_vars('xyz',", "< tofext < 423 < cal < 500\") print(\"# barrel z: -550 <", "#xvars = [\"xx\", \"yy\", \"zz\", 'x', 'y', 'z', 'xzz', 'yzz'] #xvars = ['xxxr',", "(max_dev, max_pct, avg_dev, avg_pct) # IO Df def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols", "'Bz'] df = pd.read_csv(path, sep=' ', names=sample_cols) if cylindrical_axis: df['r'] = np.sqrt(df.x**2 +", "lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result precision = prec_from_pathname(sample_file)", "#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr'] #xvars=['x',", "[test data] : %.8f\" % test_score) return pd.DataFrame( { \"xvars\": [xvars], \"yvars\": [yvars],", "if 'q4' in id: quadrant = 3 return r_bin, z_bin, quadrant def write_header(result):", "pd.read_csv(path, sep=' ', names=sample_cols) if cylindrical_axis: df['r'] = np.sqrt(df.x**2 + df.y**2) df['p'] =", "X2) # IO () def run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set,", "'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr'] #xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx',", "(lr.score(X, Y), lr.score(Xtest, Ytest)) trace(\"linear regression R^2 [train data]: %.8f\" % sample_score) trace(\"linear", "print(\"# barrel r: 0 < its < 80 < tpc < 250 <", "'y', 'z', 'Bx', 'By', 'Bz'] df = pd.read_csv(path, sep=' ', names=sample_cols) if cylindrical_axis:", "...] def combinatrial_vars(vars_str='xyz', length=3): term_list = [] for l in range(length): term_list.extend([''.join(v) for", "genvars: df[var] = term(df, var) return df def choose(vars, df1, df2): X1 =", "data]: %.8f\" % sample_score) trace(\"linear regression R^2 [test data] : %.8f\" % test_score)", "'_') def get_location_by_volume_id(id): if 'its' in id: r_bin = 0 if 'tpc' in", "lr = sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr, 1,", "= deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for i in range(len(yvars)): # arr = [lr.intercept_[i]] +", "* 100, avg_dev / prec * 100) return (max_dev, max_pct, avg_dev, avg_pct) #", ")) (sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) trace(\"linear regression R^2 [train data]:", "for i, yvar in enumerate(row.yvars): name = row.volume_id #+ '_' + yvar.lower() print(\"#", "test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] )) (sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) trace(\"linear", "print(\"\") for index, row in result.iterrows(): #print(\"// ** %s - R^2 %s\" %", "'_' + yvar.lower() print(\"# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)\"", "This file was generated from sysid.py at \" + str(datetime.datetime.today())) print(\"# \" +", "= pd.read_csv(path, sep=' ', names=sample_cols) if cylindrical_axis: df['r'] = np.sqrt(df.x**2 + df.y**2) df['p']", "= (lr.score(X, Y), lr.score(Xtest, Ytest)) trace(\"linear regression R^2 [train data]: %.8f\" % sample_score)", "id: r_bin = 3 if 'cal' in id: r_bin = 4 z_bin =", "< q2 < pi < q3 < 1.5pi < q4 < 2pi\") print(\"#", "r_bin = 4 z_bin = int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" -> 0 if 'q1' in", "< 0.5pi < q2 < pi < q3 < 1.5pi < q4 <", "use all terms upto 3rd power (X, Xtest) = choose(xvars, df, test) for", "#print(\"// ** %s - R^2 %s\" % (row.volume_id, row.score)) print(\"#\" + row.volume_id) r_bin,", "for i, sample_file in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" % (sample_file, test_set[i])) df = run_analysis(sample_file,", "glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set) == len(test_set) and len(sample_set) > 0) result = pd.DataFrame()", "> 0) result = pd.DataFrame() for i, sample_file in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" %", "#xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(', '.join(yvars) + \" = 1 + \" + '", "1 + \" + ' + '.join(result.iloc[0].xvars)) print(\"# barrel r: 0 < its", "data: Nval_per_compoment x floats\") #print(\"# R^2: coefficient of determination in multiple linear regression.", "pi < q3 < 1.5pi < q4 < 2pi\") print(\"# header: Rbin Zbin", "range(len(yvars)): # print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\" % lr.intercept_[i])", "Ytest, prec=precision) #print(\"[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % sample_dev) #print(\"[test] max", "'x', 'y', 'z', 'xzz', 'yzz'] #xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr',", "q4 < 2pi\") print(\"# header: Rbin Zbin Quadrant Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment x", "path) # ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...] def combinatrial_vars(vars_str='xyz', length=3):", "(%.1f%%) avg %.2e (%.1f%%)\" % (row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef = [row['intercept'][i]]", "np.sqrt(df.Bx**2 + df.By**2) df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x) df['Br'] = df.Bt", "print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for i in range(10): # for xvars in combinations(xvars_full, i+1):", "#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision) #test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest),", "combinations, combinations_with_replacement from glob import glob from math import * import operator from", "'y', 'xz', 'yzz'] yvars = ['Bx', 'By', 'Bz'] #yvars = ['Bz'] (Y, Ytest)", "* np.sin(df.Bpsi) if absolute_axis: df['X'] = np.abs(df.x) df['Y'] = np.abs(df.y) df['Z'] = np.abs(df.z)", "test_file) test = load_samples(test_file, genvars=xvars_full) trace(\"linear regression fit...\") lr = sklearn.linear_model.LinearRegression() #ri =", "\"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\": [sample_score], \"score\": [test_score], \"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\":", "xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power (X, Xtest)", "sample_score) #print(\"linear R^2[test] %.8f\" % test_score) #(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))", "%s' % path) # ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...] def", "IO () def run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set)", "as plt import numpy as np import pandas as pd import sklearn.linear_model import", "debug = True # enable trace def trace(x): global debug if debug: print(x)", "df['Z'] = np.abs(df.z) for var in genvars: df[var] = term(df, var) return df", "test_set[i]) result = result.append(df, ignore_index=True) write_header(result) def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):", "= int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" -> 0 if 'q1' in id: quadrant = 0", "+ '.join(result.iloc[0].xvars)) print(\"# barrel r: 0 < its < 80 < tpc <", "% test_dev ) #print(\"lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % test_dev2", "20\" % (r_bin, z_bin, quadrant)) for i, yvar in enumerate(row.yvars): name = row.volume_id", "% (r_bin, z_bin, quadrant)) for i, yvar in enumerate(row.yvars): name = row.volume_id #+", ".replace('.sample.dat', '')\\ .replace('-', '_') def get_location_by_volume_id(id): if 'its' in id: r_bin = 0", "xs, 1) # foldl in Haskell # (XYZ, \"xx\") -> XX def term(dataframe,", "x in arr ] # print(yvars[i] + \" = { \" + ',", "\" + ', '.join(result.iloc[0].yvars) + \" = 1 + \" + ' +", "< 550\") print(\"# phi: 0 < q1 < 0.5pi < q2 < pi", "[\"xx\", \"yy\", \"zz\", 'x', 'y', 'z', 'xzz', 'yzz'] #xvars = ['xxxr', 'xrrX', 'zzrX',", "tpc < 250 < tof < 400 < tofext < 423 < cal", "-550 < z < 550\") print(\"# phi: 0 < q1 < 0.5pi <", "+ df.By**2) df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x) df['Br'] = df.Bt *", "enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" % (sample_file, test_set[i])) df = run_analysis(sample_file, test_set[i]) result = result.append(df,", "Ytest, ana_result precision = prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz', 3)[3:] #", "row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef = [row['intercept'][i]] + list(row['coeffs'][i]) arr = [ \"%.5e\" %", "= choose(xvars, df, test) #lr.fit(X, Y) #ri.fit(X, Y) #la.fit(X, Y) #fs.fit(X, Y) #print", "combinatrial_vars(vars_str='xyz', length=3): term_list = [] for l in range(length): term_list.extend([''.join(v) for v in", "for l in range(length): term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)]) return", "#print(\"linear R^2[test] %.8f\" % test_score) #(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest)) #print(\"lasso", "print(x) trace(\"loading...\") from itertools import combinations, combinations_with_replacement from glob import glob from math", "= prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x,", "in yvars: fs.fit(X, df[y]) res = pd.DataFrame({ \"term\": xvars, \"rank\": fs.ranking_ }) trace(y)", "[lr.intercept_], \"sample_file\": [sample_file], \"test_file\": [test_file], \"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)] }) def volume_id_from_path(path): return", "[xvars], \"yvars\": [yvars], \"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\": [sample_score],", "combinations(xvars_full, i+1): #(X, Xtest) = choose(xvars, df, test) #lr.fit(X, Y) #ri.fit(X, Y) #la.fit(X,", "combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims trace(\"reading training", ": %.8f\" % test_score) return pd.DataFrame( { \"xvars\": [xvars], \"yvars\": [yvars], \"max_dev\": [test_dev[0]],", "df2.loc[:, vars].as_matrix() return (X1, X2) # IO () def run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\")", "int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" -> 0 if 'q1' in id: quadrant = 0 if", "+ \" + ' + '.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for i", "print(\"# This file was generated from sysid.py at \" + str(datetime.datetime.today())) print(\"# \"", "test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df, test, lr, la, xvars_full, xvars, yvars, X,", "in coef ] body = ' '.join(arr) #decl = \"const double[] %s =", "avg %.2e (%.1f%%)\" % # ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] )) (sample_score, test_score)", "'tof' in id: r_bin = 2 if 'tofext' in id: r_bin = 3", "df = load_samples(sample_file, genvars=xvars_full) trace(\"reading test samples...\" + test_file) test = load_samples(test_file, genvars=xvars_full)", "df, test) #lr.fit(X, Y) #ri.fit(X, Y) #la.fit(X, Y) #fs.fit(X, Y) #print xvars #(sample_score,", "z upto 3 dims trace(\"reading training samples... \" + sample_file) df = load_samples(sample_file,", "list(row['coeffs'][i]) arr = [ \"%.5e\" % x for x in coef ] body", "result = result.append(df, ignore_index=True) write_header(result) def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global", "(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct) = (max_dev / prec * 100,", "'')\\ .replace('-', '_') def get_location_by_volume_id(id): if 'its' in id: r_bin = 0 if", "global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest,", "\"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\" % lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X), Y, prec=precision) #test_dev", "= get_location_by_volume_id(row.volume_id) print(\"%s %s %s 20\" % (r_bin, z_bin, quadrant)) for i, yvar", "debug: print(x) trace(\"loading...\") from itertools import combinations, combinations_with_replacement from glob import glob from", "< 250 < tof < 400 < tofext < 423 < cal <", "avg dev, avg%) def deviation_stat(fX, Y, prec=0.005): dev = np.abs(fX - Y) (max_dev,", "return df def choose(vars, df1, df2): X1 = df1.loc[:, vars].as_matrix() X2 = df2.loc[:,", "glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set) == len(test_set) and len(sample_set) > 0)", "print(\"# data: Nval_per_compoment x floats\") #print(\"# R^2: coefficient of determination in multiple linear", "'xzrX', 'xxxz', 'xzzr'] #xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y',", "sample_file in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" % (sample_file, test_set[i])) df = run_analysis(sample_file, test_set[i]) result", "#decl = \"const double[] %s = { %s };\\n\" % (name, body) #print(decl)", "x: dataframe[x], list(vars_str))) # (f(X), Y) -> (max deviation, max%, avg dev, avg%)", "precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result", "%.8f\" % sample_score) #print(\"linear R^2[test] %.8f\" % test_score) #(sample_score2, test_score2) = (la.score(X, Y),", "= df.Bt * np.sin(df.Bpsi) if absolute_axis: df['X'] = np.abs(df.x) df['Y'] = np.abs(df.y) df['Z']", "q3 < 1.5pi < q4 < 2pi\") print(\"# header: Rbin Zbin Quadrant Nval_per_compoment(=20)\")", "= np.arctan2(df.y, df.x) df['Bt'] = np.sqrt(df.Bx**2 + df.By**2) df['Bpsi'] = np.arctan2(df.By, df.Bx) -", "%s = { %s };\\n\" % (name, body) #print(decl) print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields()", "\"%.5e\" % x for x in coef ] body = ' '.join(arr) #decl", "< q3 < 1.5pi < q4 < 2pi\") print(\"# header: Rbin Zbin Quadrant", "return reduce(operator.mul, xs, 1) # foldl in Haskell # (XYZ, \"xx\") -> XX", "run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df, test, lr, la, xvars_full,", "#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" %", "%.2e (%.1f%%) avg %.2e (%.1f%%)\" % (row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef =", "#print(\"[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % sample_dev) #print(\"[test] max %.2e (%.1f%%)", "= ['Bz'] (Y, Ytest) = choose(yvars, df, test) #(Y, Ytest) = (df['Bz'], test['Bz'])", "quadrant = 3 return r_bin, z_bin, quadrant def write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This", "#la = sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars =", "= [row['intercept'][i]] + list(row['coeffs'][i]) arr = [ \"%.5e\" % x for x in", "#ri.fit(X, Y) #la.fit(X, Y) #fs.fit(X, Y) #print xvars #(sample_score, test_score) = (lr.score(X, Y),", "% test_score) #(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest)) #print(\"lasso R^2[sample] %.8f\" %", "id: r_bin = 0 if 'tpc' in id: r_bin = 1 if 'tof'", "length=3): term_list = [] for l in range(length): term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str),", "# (XYZ, \"xx\") -> XX def term(dataframe, vars_str): return product(map(lambda x: dataframe[x], list(vars_str)))", "#write_header(run_analysis()) run_analysis_for_all_fields() #for i in range(10): # for xvars in combinations(xvars_full, i+1): #(X,", "{ \" + ', '.join(arr) + \" }\") # print(\"deviation stat [test]: max", "'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz'] yvars = ['Bx', 'By', 'Bz']", "df def choose(vars, df1, df2): X1 = df1.loc[:, vars].as_matrix() X2 = df2.loc[:, vars].as_matrix()", "', '.join(arr) + \" }\") # print(\"deviation stat [test]: max %.2e (%.1f%%) avg", "= 2 if 'tofext' in id: r_bin = 3 if 'cal' in id:", "glob from math import * import operator from os.path import basename import matplotlib.pyplot", "max%, avg dev, avg%) def deviation_stat(fX, Y, prec=0.005): dev = np.abs(fX - Y)", "str(datetime.datetime.today())) print(\"# \" + ', '.join(result.iloc[0].yvars) + \" = 1 + \" +", "strengh: %s' % path) # ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]", "(f(X), Y) -> (max deviation, max%, avg dev, avg%) def deviation_stat(fX, Y, prec=0.005):", "- np.arctan2(df.y, df.x) df['Br'] = df.Bt * np.cos(df.Bpsi) df['Bp'] = df.Bt * np.sin(df.Bpsi)", "in id: r_bin = 3 if 'cal' in id: r_bin = 4 z_bin", "res = pd.DataFrame({ \"term\": xvars, \"rank\": fs.ranking_ }) trace(y) trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term'])", "in path: return 0.002 elif '5k' in path: return 0.005 else: raise AssertionError('Unknown", "if debug: print(x) trace(\"loading...\") from itertools import combinations, combinations_with_replacement from glob import glob", "test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) trace(\"linear regression R^2 [train data]: %.8f\" %", "import sklearn.linear_model import sklearn.feature_selection import datetime def prec_from_pathname(path): if '2k' in path: return", "in id: r_bin = 2 if 'tofext' in id: r_bin = 3 if", "[precision], \"volume_id\": [volume_id_from_path(sample_file)] }) def volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat', '')\\ .replace('-', '_') def", "Xtest) = choose(xvars, df, test) #lr.fit(X, Y) #ri.fit(X, Y) #la.fit(X, Y) #fs.fit(X, Y)", "= sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV() fs = sklearn.feature_selection.RFE(lr, 1, verbose=0) #xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']", "= glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set) == len(test_set) and len(sample_set) >", "#result.to_html(\"magfield_params.html\") print(\"# This file was generated from sysid.py at \" + str(datetime.datetime.today())) print(\"#", "= np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x) df['Br'] = df.Bt * np.cos(df.Bpsi) df['Bp'] =", "as np import pandas as pd import sklearn.linear_model import sklearn.feature_selection import datetime def", "deviation_stat(fX, Y, prec=0.005): dev = np.abs(fX - Y) (max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))", "return (X1, X2) # IO () def run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set =", "prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y,", "training samples... \" + sample_file) df = load_samples(sample_file, genvars=xvars_full) trace(\"reading test samples...\" +", "get_location_by_volume_id(id): if 'its' in id: r_bin = 0 if 'tpc' in id: r_bin", "Y) -> (max deviation, max%, avg dev, avg%) def deviation_stat(fX, Y, prec=0.005): dev", "avg_dev / prec * 100) return (max_dev, max_pct, avg_dev, avg_pct) # IO Df", "result.append(df, ignore_index=True) write_header(result) def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df,", "'xx', 'xy', 'xz', 'yy', ...] def combinatrial_vars(vars_str='xyz', length=3): term_list = [] for l", "== len(test_set) and len(sample_set) > 0) result = pd.DataFrame() for i, sample_file in", "upto 3 dims trace(\"reading training samples... \" + sample_file) df = load_samples(sample_file, genvars=xvars_full)", "% (row.volume_id, row.score)) print(\"#\" + row.volume_id) r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id) print(\"%s %s", "choose(xvars, df, test) for y in yvars: fs.fit(X, df[y]) res = pd.DataFrame({ \"term\":", "[row['intercept'][i]] + list(row['coeffs'][i]) arr = [ \"%.5e\" % x for x in coef", "sample_score2) #print(\"lasso R^2[test] %.8f\" % test_score2) #print(la.coef_) #for i in range(len(yvars)): # print(yvars[i])", "as pd import sklearn.linear_model import sklearn.feature_selection import datetime def prec_from_pathname(path): if '2k' in", "\"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\": [sample_file], \"test_file\": [test_file], \"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)] })", "return pd.DataFrame( { \"xvars\": [xvars], \"yvars\": [yvars], \"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]],", "at \" + str(datetime.datetime.today())) print(\"# \" + ', '.join(result.iloc[0].yvars) + \" = 1", "400 < tofext < 423 < cal < 500\") print(\"# barrel z: -550", "dataframe[x], list(vars_str))) # (f(X), Y) -> (max deviation, max%, avg dev, avg%) def", "= load_samples(test_file, genvars=xvars_full) trace(\"linear regression fit...\") lr = sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV() #la", "z_bin = int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\" -> 0 if 'q1' in id: quadrant =", "df.By**2) df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x) df['Br'] = df.Bt * np.cos(df.Bpsi)", "< q4 < 2pi\") print(\"# header: Rbin Zbin Quadrant Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment", "['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr'] #xvars=['x', 'xzz', 'xyz',", "prec * 100, avg_dev / prec * 100) return (max_dev, max_pct, avg_dev, avg_pct)", "+ yvar.lower() print(\"# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)\" %", "< 400 < tofext < 423 < cal < 500\") print(\"# barrel z:", "arr = [ \"%.5e\" % x for x in coef ] body =", "z_bin, quadrant = get_location_by_volume_id(row.volume_id) print(\"%s %s %s 20\" % (r_bin, z_bin, quadrant)) for", "[test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % # ( test_dev[0][i], test_dev[1][i], test_dev[2][i],", "R^2 [test data] : %.8f\" % test_score) return pd.DataFrame( { \"xvars\": [xvars], \"yvars\":", "else: raise AssertionError('Unknown field strengh: %s' % path) # ['x', 'y', 'z', 'xx',", "+ sample_file) df = load_samples(sample_file, genvars=xvars_full) trace(\"reading test samples...\" + test_file) test =", "0) result = pd.DataFrame() for i, sample_file in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" % (sample_file,", "\"avg%\": [test_dev[3]], \"sample_score\": [sample_score], \"score\": [test_score], \"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\": [sample_file], \"test_file\":", "80 < tpc < 250 < tof < 400 < tofext < 423", "coef = [row['intercept'][i]] + list(row['coeffs'][i]) arr = [ \"%.5e\" % x for x", "path: return 0.002 elif '5k' in path: return 0.005 else: raise AssertionError('Unknown field", "max_pct, avg_dev, avg_pct) # IO Df def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols =", "#yvars = ['Bz'] (Y, Ytest) = choose(yvars, df, test) #(Y, Ytest) = (df['Bz'],", "np.abs(df.x) df['Y'] = np.abs(df.y) df['Z'] = np.abs(df.z) for var in genvars: df[var] =", "xvars, \"rank\": fs.ranking_ }) trace(y) trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(', '.join(yvars)", "pd.DataFrame() for i, sample_file in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" % (sample_file, test_set[i])) df =", "# print(yvars[i] + \" = { \" + ', '.join(arr) + \" }\")", "run_analysis(sample_file, test_set[i]) result = result.append(df, ignore_index=True) write_header(result) def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file =", "{ \"xvars\": [xvars], \"yvars\": [yvars], \"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]],", "id: r_bin = 1 if 'tof' in id: r_bin = 2 if 'tofext'", "%s - R^2 %s\" % (row.volume_id, row.score)) print(\"#\" + row.volume_id) r_bin, z_bin, quadrant", "(%.1f%%) avg %.2e (%.1f%%)\" % test_dev ) #print(\"lasso [test] max %.2e (%.1f%%) avg", "df, test) #(Y, Ytest) = (df['Bz'], test['Bz']) xvars = combinatrial_vars('xyz', 3) # use", "'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df, test, lr, la, xvars_full, xvars, yvars,", "\" }\") # print(\"deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)\" %", "barrel z: -550 < z < 550\") print(\"# phi: 0 < q1 <", "x, y, z upto 3 dims trace(\"reading training samples... \" + sample_file) df", "coef ] body = ' '.join(arr) #decl = \"const double[] %s = {", "def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat', test_file = 'dat_z22/tpc2k-z0-q2.test.dat'): global precision, df, test, lr, la,", "+ ', '.join(result.iloc[0].yvars) + \" = 1 + \" + ' + '.join(result.iloc[0].xvars))", "'q1' in id: quadrant = 0 if 'q2' in id: quadrant = 1", "Zbin Quadrant Nval_per_compoment(=20)\") print(\"# data: Nval_per_compoment x floats\") #print(\"# R^2: coefficient of determination", "r: 0 < its < 80 < tpc < 250 < tof <", "run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set) assert(len(sample_set) == len(test_set) and", "df1.loc[:, vars].as_matrix() X2 = df2.loc[:, vars].as_matrix() return (X1, X2) # IO () def", "= 2 if 'q4' in id: quadrant = 3 return r_bin, z_bin, quadrant", "in range(len(yvars)): # arr = [lr.intercept_[i]] + lr.coef_[i] # arr = [ str(x)", "\" + ' + '.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for i in", "Df def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols = ['x', 'y', 'z', 'Bx', 'By',", "global debug if debug: print(x) trace(\"loading...\") from itertools import combinations, combinations_with_replacement from glob", "def combinatrial_vars(vars_str='xyz', length=3): term_list = [] for l in range(length): term_list.extend([''.join(v) for v", "[test_dev[3]], \"sample_score\": [sample_score], \"score\": [test_score], \"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_], \"sample_file\": [sample_file], \"test_file\": [test_file],", "id: r_bin = 2 if 'tofext' in id: r_bin = 3 if 'cal'", "i in range(len(yvars)): # print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\"", "lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X), Y, prec=precision) #test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2 =", "Y, prec=precision) #test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample]", "# arr = [ str(x) for x in arr ] # print(yvars[i] +", "'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr'] #xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz',", "trace(\"linear regression R^2 [test data] : %.8f\" % test_score) return pd.DataFrame( { \"xvars\":", "100, avg_dev / prec * 100) return (max_dev, max_pct, avg_dev, avg_pct) # IO", "double[] %s = { %s };\\n\" % (name, body) #print(decl) print(body) print(\"\") #write_header(run_analysis())", "arr = [ str(x) for x in arr ] # print(yvars[i] + \"", "0 if 'tpc' in id: r_bin = 1 if 'tof' in id: r_bin", "cylindrical_axis=True, absolute_axis=True, genvars=[]): sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz'] df =", "] # print(yvars[i] + \" = { \" + ', '.join(arr) + \"", "import * import operator from os.path import basename import matplotlib.pyplot as plt import", "df.Bt * np.sin(df.Bpsi) if absolute_axis: df['X'] = np.abs(df.x) df['Y'] = np.abs(df.y) df['Z'] =", "#print(decl) print(body) print(\"\") #write_header(run_analysis()) run_analysis_for_all_fields() #for i in range(10): # for xvars in", "test) #(Y, Ytest) = (df['Bz'], test['Bz']) xvars = combinatrial_vars('xyz', 3) # use all", "write_header(result): #result.to_csv(\"magfield_params.csv\") #result.to_html(\"magfield_params.html\") print(\"# This file was generated from sysid.py at \" +", "in combinations_with_replacement(list(vars_str), 1 + l)]) return term_list # product :: a#* => [a]", "# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] )) (sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest,", "R^2 %s\" % (row.volume_id, row.score)) print(\"#\" + row.volume_id) r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)", "XX def term(dataframe, vars_str): return product(map(lambda x: dataframe[x], list(vars_str))) # (f(X), Y) ->", "(sample_file, test_set[i])) df = run_analysis(sample_file, test_set[i]) result = result.append(df, ignore_index=True) write_header(result) def run_analysis(sample_file", "['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\", \"yy\", \"zz\", 'x', 'y', 'z', 'xzz', 'yzz'] #xvars =", "= 1 + \" + ' + '.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)", "#print(\"linear R^2[sample] %.8f\" % sample_score) #print(\"linear R^2[test] %.8f\" % test_score) #(sample_score2, test_score2) =", "avg %.2e (%.1f%%)\" % (row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef = [row['intercept'][i]] +", "trace(\"linear regression fit...\") lr = sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV() #la = sklearn.linear_model.LassoCV() fs", "'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr'] #xvars=['x', 'xzz', 'xyz', 'yz', 'yy',", "(X, Xtest) = choose(xvars, df, test) for y in yvars: fs.fit(X, df[y]) res", "= pd.DataFrame({ \"term\": xvars, \"rank\": fs.ranking_ }) trace(y) trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X,", "print(\"# barrel z: -550 < z < 550\") print(\"# phi: 0 < q1", "\"rank\": fs.ranking_ }) trace(y) trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(', '.join(yvars) +", "Ytest)) trace(\"linear regression R^2 [train data]: %.8f\" % sample_score) trace(\"linear regression R^2 [test", "[sample_file], \"test_file\": [test_file], \"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)] }) def volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat',", "vars_str): return product(map(lambda x: dataframe[x], list(vars_str))) # (f(X), Y) -> (max deviation, max%,", "[0,1]\") print(\"\") for index, row in result.iterrows(): #print(\"// ** %s - R^2 %s\"", "= pd.DataFrame() for i, sample_file in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\" % (sample_file, test_set[i])) df", "= ['Bx', 'By', 'Bz'] #yvars = ['Bz'] (Y, Ytest) = choose(yvars, df, test)", "prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3", "variables except x, y, z upto 3 dims trace(\"reading training samples... \" +", "df['Br'] = df.Bt * np.cos(df.Bpsi) df['Bp'] = df.Bt * np.sin(df.Bpsi) if absolute_axis: df['X']", "\"term\": xvars, \"rank\": fs.ranking_ }) trace(y) trace(res.sort_values(by = \"rank\")) #xvars=list(res.sort_values(by=\"rank\")[:26]['term']) lr.fit(X, Y) trace(',", "'Bx', 'By', 'Bz'] df = pd.read_csv(path, sep=' ', names=sample_cols) if cylindrical_axis: df['r'] =", "prec_from_pathname(path): if '2k' in path: return 0.002 elif '5k' in path: return 0.005", "math import * import operator from os.path import basename import matplotlib.pyplot as plt", "dev = np.abs(fX - Y) (max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct) =", "['Bz'] (Y, Ytest) = choose(yvars, df, test) #(Y, Ytest) = (df['Bz'], test['Bz']) xvars", "= [ \"%.5e\" % x for x in coef ] body = '", "z: -550 < z < 550\") print(\"# phi: 0 < q1 < 0.5pi", "glob import glob from math import * import operator from os.path import basename", "np.sin(df.Bpsi) if absolute_axis: df['X'] = np.abs(df.x) df['Y'] = np.abs(df.y) df['Z'] = np.abs(df.z) for", "= 3 if 'cal' in id: r_bin = 4 z_bin = int(id.split('_')[1][1:]) #", "def choose(vars, df1, df2): X1 = df1.loc[:, vars].as_matrix() X2 = df2.loc[:, vars].as_matrix() return", "3) # use all terms upto 3rd power (X, Xtest) = choose(xvars, df,", "= ' '.join(arr) #decl = \"const double[] %s = { %s };\\n\" %", "= \"const double[] %s = { %s };\\n\" % (name, body) #print(decl) print(body)", "R^2: coefficient of determination in multiple linear regression. [0,1]\") print(\"\") for index, row", "+ l)]) return term_list # product :: a#* => [a] -> a def", "R^2[sample] %.8f\" % sample_score2) #print(\"lasso R^2[test] %.8f\" % test_score2) #print(la.coef_) #for i in", "numpy as np import pandas as pd import sklearn.linear_model import sklearn.feature_selection import datetime", "avg %.2e (%.1f%%)\" % test_dev ) #print(\"lasso [test] max %.2e (%.1f%%) avg %.2e", "samples... \" + sample_file) df = load_samples(sample_file, genvars=xvars_full) trace(\"reading test samples...\" + test_file)", "yvar.lower() print(\"# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % (row['precision'],", "in range(10): # for xvars in combinations(xvars_full, i+1): #(X, Xtest) = choose(xvars, df,", "test = load_samples(test_file, genvars=xvars_full) trace(\"linear regression fit...\") lr = sklearn.linear_model.LinearRegression() #ri = sklearn.linear_model.RidgeCV()", "* np.cos(df.Bpsi) df['Bp'] = df.Bt * np.sin(df.Bpsi) if absolute_axis: df['X'] = np.abs(df.x) df['Y']", "np.sqrt(df.x**2 + df.y**2) df['p'] = np.arctan2(df.y, df.x) df['Bt'] = np.sqrt(df.Bx**2 + df.By**2) df['Bpsi']", "'q3' in id: quadrant = 2 if 'q4' in id: quadrant = 3", "sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz'] df = pd.read_csv(path, sep=' ',", "+ list(row['coeffs'][i]) arr = [ \"%.5e\" % x for x in coef ]", "Xtest) = choose(xvars, df, test) for y in yvars: fs.fit(X, df[y]) res =", "= deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample] max %.2e (%.1f%%)", "1 if 'tof' in id: r_bin = 2 if 'tofext' in id: r_bin", "Xtest, Ytest, ana_result precision = prec_from_pathname(sample_file) assert(precision == prec_from_pathname(test_file)) xvars_full = combinatrial_vars('xyz', 3)[3:]", "'xzzr'] #xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']", "%e\" % lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X), Y, prec=precision) #test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)", "= combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims trace(\"reading", "in range(len(yvars)): # print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\" %", "(X1, X2) # IO () def run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\")", "run_analysis_for_all_fields() #for i in range(10): # for xvars in combinations(xvars_full, i+1): #(X, Xtest)", "df.y**2) df['p'] = np.arctan2(df.y, df.x) df['Bt'] = np.sqrt(df.Bx**2 + df.By**2) df['Bpsi'] = np.arctan2(df.By,", "tof < 400 < tofext < 423 < cal < 500\") print(\"# barrel", "from sysid.py at \" + str(datetime.datetime.today())) print(\"# \" + ', '.join(result.iloc[0].yvars) + \"", "max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % (row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef", "= np.sqrt(df.Bx**2 + df.By**2) df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x) df['Br'] =", "'q2' in id: quadrant = 1 if 'q3' in id: quadrant = 2", "500\") print(\"# barrel z: -550 < z < 550\") print(\"# phi: 0 <", "#print(sample_set, test_set) assert(len(sample_set) == len(test_set) and len(sample_set) > 0) result = pd.DataFrame() for", "[yvars], \"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\": [sample_score], \"score\": [test_score],", "var in genvars: df[var] = term(df, var) return df def choose(vars, df1, df2):", "%.8f\" % test_score2) #print(la.coef_) #for i in range(len(yvars)): # print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars,", "0 < its < 80 < tpc < 250 < tof < 400", "}) def volume_id_from_path(path): return basename(path)\\ .replace('.sample.dat', '')\\ .replace('-', '_') def get_location_by_volume_id(id): if 'its'", "#for i in range(len(yvars)): # print(yvars[i]) # print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+", "+ ' + '.join(result.iloc[0].xvars)) print(\"# barrel r: 0 < its < 80 <", "x floats\") #print(\"# R^2: coefficient of determination in multiple linear regression. [0,1]\") print(\"\")", "choose(vars, df1, df2): X1 = df1.loc[:, vars].as_matrix() X2 = df2.loc[:, vars].as_matrix() return (X1,", "[lr.intercept_[i]] + lr.coef_[i] # arr = [ str(x) for x in arr ]", "True # enable trace def trace(x): global debug if debug: print(x) trace(\"loading...\") from", "'2k' in path: return 0.002 elif '5k' in path: return 0.005 else: raise", "len(sample_set) > 0) result = pd.DataFrame() for i, sample_file in enumerate(sample_set): trace(\"run_analysis('%s', '%s')\"", "'xx', 'z', 'y', 'xz', 'yzz'] yvars = ['Bx', 'By', 'Bz'] #yvars = ['Bz']", "coefficient of determination in multiple linear regression. [0,1]\") print(\"\") for index, row in", "df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x) df['Br'] = df.Bt * np.cos(df.Bpsi) df['Bp']", "Y), lr.score(Xtest, Ytest)) #print(\"linear R^2[sample] %.8f\" % sample_score) #print(\"linear R^2[test] %.8f\" % test_score)", "enumerate(row.yvars): name = row.volume_id #+ '_' + yvar.lower() print(\"# precision: tgt %.2e max", "# print(\"deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % # (", "\"intercept\": [lr.intercept_], \"sample_file\": [sample_file], \"test_file\": [test_file], \"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)] }) def volume_id_from_path(path):", "# (f(X), Y) -> (max deviation, max%, avg dev, avg%) def deviation_stat(fX, Y,", "[train data]: %.8f\" % sample_score) trace(\"linear regression R^2 [test data] : %.8f\" %", "R^2 [train data]: %.8f\" % sample_score) trace(\"linear regression R^2 [test data] : %.8f\"", "= (lr.score(X, Y), lr.score(Xtest, Ytest)) #print(\"linear R^2[sample] %.8f\" % sample_score) #print(\"linear R^2[test] %.8f\"", "\"yvars\": [yvars], \"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\": [sample_score], \"score\":", "len(test_set) and len(sample_set) > 0) result = pd.DataFrame() for i, sample_file in enumerate(sample_set):", "+ lr.coef_[i] # arr = [ str(x) for x in arr ] #", "import numpy as np import pandas as pd import sklearn.linear_model import sklearn.feature_selection import", "AssertionError('Unknown field strengh: %s' % path) # ['x', 'y', 'z', 'xx', 'xy', 'xz',", "= (df['Bz'], test['Bz']) xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd", "get_location_by_volume_id(row.volume_id) print(\"%s %s %s 20\" % (r_bin, z_bin, quadrant)) for i, yvar in", "'xxxz', 'xzzr'] #xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz',", "power (X, Xtest) = choose(xvars, df, test) for y in yvars: fs.fit(X, df[y])", "lr.score(Xtest, Ytest)) trace(\"linear regression R^2 [train data]: %.8f\" % sample_score) trace(\"linear regression R^2", "print(\"# phi: 0 < q1 < 0.5pi < q2 < pi < q3", "prec=0.005): dev = np.abs(fX - Y) (max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct)", "return 0.002 elif '5k' in path: return 0.005 else: raise AssertionError('Unknown field strengh:", "# print(pd.DataFrame({\"Name\": xvars, \"Params\": lr.coef_[i]}).sort_values(by='Params')) # print(\"+ %e\" % lr.intercept_[i]) #sample_dev = deviation_stat(lr.predict(X),", "np.cos(df.Bpsi) df['Bp'] = df.Bt * np.sin(df.Bpsi) if absolute_axis: df['X'] = np.abs(df.x) df['Y'] =", "reduce(operator.mul, xs, 1) # foldl in Haskell # (XYZ, \"xx\") -> XX def", "', '.join(result.iloc[0].yvars) + \" = 1 + \" + ' + '.join(result.iloc[0].xvars)) print(\"#", "test_set[i])) df = run_analysis(sample_file, test_set[i]) result = result.append(df, ignore_index=True) write_header(result) def run_analysis(sample_file =", "prec=precision) #print(\"[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % sample_dev) #print(\"[test] max %.2e", "%.2e (%.1f%%)\" % (row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i])) coef = [row['intercept'][i]] + list(row['coeffs'][i])", "stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % # ( test_dev[0][i], test_dev[1][i],", "prec=precision) #test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision) #print(\"[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)\"", "% (sample_file, test_set[i])) df = run_analysis(sample_file, test_set[i]) result = result.append(df, ignore_index=True) write_header(result) def", "linear regression. [0,1]\") print(\"\") for index, row in result.iterrows(): #print(\"// ** %s -", "0.005 else: raise AssertionError('Unknown field strengh: %s' % path) # ['x', 'y', 'z',", "= df1.loc[:, vars].as_matrix() X2 = df2.loc[:, vars].as_matrix() return (X1, X2) # IO ()", "= ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz'] #xvars = [\"xx\", \"yy\", \"zz\", 'x', 'y', 'z', 'xzz', 'yzz'] #xvars", "in multiple linear regression. [0,1]\") print(\"\") for index, row in result.iterrows(): #print(\"// **", "print(\"# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % (row['precision'], row['max_dev'][i],", "#print xvars #(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest)) #print(\"linear R^2[sample] %.8f\" %", "%.2e (%.1f%%)\" % # ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] )) (sample_score, test_score) =", "'.join(arr) #decl = \"const double[] %s = { %s };\\n\" % (name, body)", "'.join(xvars)) test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision) #for i in range(len(yvars)): # arr =", "'tpc' in id: r_bin = 1 if 'tof' in id: r_bin = 2", "< tof < 400 < tofext < 423 < cal < 500\") print(\"#", "'%s')\" % (sample_file, test_set[i])) df = run_analysis(sample_file, test_set[i]) result = result.append(df, ignore_index=True) write_header(result)", "-> (max deviation, max%, avg dev, avg%) def deviation_stat(fX, Y, prec=0.005): dev =", "df.Bt * np.cos(df.Bpsi) df['Bp'] = df.Bt * np.sin(df.Bpsi) if absolute_axis: df['X'] = np.abs(df.x)", "% test_score) return pd.DataFrame( { \"xvars\": [xvars], \"yvars\": [yvars], \"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]],", "for index, row in result.iterrows(): #print(\"// ** %s - R^2 %s\" % (row.volume_id,", "import combinations, combinations_with_replacement from glob import glob from math import * import operator", "3 if 'cal' in id: r_bin = 4 z_bin = int(id.split('_')[1][1:]) # \"tofext2k_z0_q4\"", "range(length): term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)]) return term_list # product", "< 80 < tpc < 250 < tof < 400 < tofext <", "df, test) for y in yvars: fs.fit(X, df[y]) res = pd.DataFrame({ \"term\": xvars,", "\"sample_file\": [sample_file], \"test_file\": [test_file], \"precision\": [precision], \"volume_id\": [volume_id_from_path(sample_file)] }) def volume_id_from_path(path): return basename(path)\\", "quadrant = 1 if 'q3' in id: quadrant = 2 if 'q4' in", "regression. [0,1]\") print(\"\") for index, row in result.iterrows(): #print(\"// ** %s - R^2", "tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % (row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i],", "[ \"%.5e\" % x for x in coef ] body = ' '.join(arr)", "%.2e (%.1f%%)\" % test_dev ) #print(\"lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\"", "+ \" = { \" + ', '.join(arr) + \" }\") # print(\"deviation", "generated from sysid.py at \" + str(datetime.datetime.today())) print(\"# \" + ', '.join(result.iloc[0].yvars) +", "# enable trace def trace(x): global debug if debug: print(x) trace(\"loading...\") from itertools", "< 423 < cal < 500\") print(\"# barrel z: -550 < z <", "#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest)) #print(\"lasso R^2[sample] %.8f\" % sample_score2) #print(\"lasso", "# IO () def run_analysis_for_all_fields(): sample_set = glob(\"dat_z22/*2k*.sample.dat\") test_set = glob(\"dat_z22/*2k*.test.dat\") #print(sample_set, test_set)", "= (dev.max(axis=0), dev.mean(axis=0)) (max_pct, avg_pct) = (max_dev / prec * 100, avg_dev /", "avg %.2e (%.1f%%)\" % sample_dev) #print(\"[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" %", "test_score) return pd.DataFrame( { \"xvars\": [xvars], \"yvars\": [yvars], \"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\":", "[test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\": [sample_score], \"score\": [test_score], \"coeffs\": [lr.coef_], \"intercept\": [lr.intercept_],", "\"xvars\": [xvars], \"yvars\": [yvars], \"max_dev\": [test_dev[0]], \"max%\": [test_dev[1]], \"avg_dev\": [test_dev[2]], \"avg%\": [test_dev[3]], \"sample_score\":", "return basename(path)\\ .replace('.sample.dat', '')\\ .replace('-', '_') def get_location_by_volume_id(id): if 'its' in id: r_bin", "\" = 1 + \" + ' + '.join(result.iloc[0].xvars)) print(\"# barrel r: 0", "names=sample_cols) if cylindrical_axis: df['r'] = np.sqrt(df.x**2 + df.y**2) df['p'] = np.arctan2(df.y, df.x) df['Bt']", "'y', 'z', 'xzz', 'yzz'] #xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY',", "' + '.join(result.iloc[0].xvars)) print(\"# barrel r: 0 < its < 80 < tpc", "(%.1f%%)\" % sample_dev) #print(\"[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)\" % test_dev )", "%.2e (%.1f%%) avg %.2e (%.1f%%)\" % # ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))", "return (max_dev, max_pct, avg_dev, avg_pct) # IO Df def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):", "%s 20\" % (r_bin, z_bin, quadrant)) for i, yvar in enumerate(row.yvars): name =", "1.5pi < q4 < 2pi\") print(\"# header: Rbin Zbin Quadrant Nval_per_compoment(=20)\") print(\"# data:", "* 100) return (max_dev, max_pct, avg_dev, avg_pct) # IO Df def load_samples(path, cylindrical_axis=True,", "#for i in range(len(yvars)): # arr = [lr.intercept_[i]] + lr.coef_[i] # arr =", "of determination in multiple linear regression. [0,1]\") print(\"\") for index, row in result.iterrows():", "[a] -> a def product(xs): return reduce(operator.mul, xs, 1) # foldl in Haskell", "%s %s 20\" % (r_bin, z_bin, quadrant)) for i, yvar in enumerate(row.yvars): name", "'z', 'xzz', 'yzz'] #xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX',", "index, row in result.iterrows(): #print(\"// ** %s - R^2 %s\" % (row.volume_id, row.score))", "basename import matplotlib.pyplot as plt import numpy as np import pandas as pd" ]
[ "# Combine operations with centring allops = dif.fc.gen_symcen_ops(ops, cen) # Convert to magnetic", "Plotting cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction as dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif'", "magnetic symmetry magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) | %-40s' % ('Symmetry Operations', len(allops),", "dif.fc.gen_symcen_ops(ops, cen) # Convert to magnetic symmetry magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) |", "len(allops), 'Magnetic operations')) for n in range(len(allops)): print('%40s | %-40s' % (allops[n], magops[n]))", "cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):' % len(ops)) print(ops) print('Centring Operations (%d):' %", "as plt # Plotting cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction as dif", "(%d):' % len(cen)) print(cen) # Combine operations with centring allops = dif.fc.gen_symcen_ops(ops, cen)", "cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):' % len(ops)) print(ops) print('Centring Operations (%d):'", "sys,os import numpy as np import matplotlib.pyplot as plt # Plotting cf =", "\"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):'", "% len(ops)) print(ops) print('Centring Operations (%d):' % len(cen)) print(cen) # Combine operations with", "for n in range(len(allops)): print('%40s | %-40s' % (allops[n], magops[n])) sym, mag, tim", "(%d):' % len(ops)) print(ops) print('Centring Operations (%d):' % len(cen)) print(cen) # Combine operations", "Examples Load space groups and look at the information contained. \"\"\" import sys,os", "symmetry magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) | %-40s' % ('Symmetry Operations', len(allops), 'Magnetic", "= dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n in range(len(sym)): print('%40s | %+d | %-40s' %", "# Plotting cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction as dif f =", "plt # Plotting cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction as dif f", "cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction as dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f", "% len(cen)) print(cen) # Combine operations with centring allops = dif.fc.gen_symcen_ops(ops, cen) #", "= dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):' % len(ops))", "dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) | %-40s' % ('Symmetry Operations', len(allops), 'Magnetic operations')) for n", "print('\\ncif_symmetry') for n in range(len(sym)): print('%40s | %+d | %-40s' % (sym[n], tim[n],", "\"\"\" Dans_Diffraction Examples Load space groups and look at the information contained. \"\"\"", "len(ops)) print(ops) print('Centring Operations (%d):' % len(cen)) print(cen) # Combine operations with centring", "space groups and look at the information contained. \"\"\" import sys,os import numpy", "magops[n])) sym, mag, tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n in range(len(sym)): print('%40s |", "operations')) for n in range(len(allops)): print('%40s | %-40s' % (allops[n], magops[n])) sym, mag,", "Operations (%d):' % len(cen)) print(cen) # Combine operations with centring allops = dif.fc.gen_symcen_ops(ops,", "len(cen)) print(cen) # Combine operations with centring allops = dif.fc.gen_symcen_ops(ops, cen) # Convert", "Combine operations with centring allops = dif.fc.gen_symcen_ops(ops, cen) # Convert to magnetic symmetry", "print(cen) # Combine operations with centring allops = dif.fc.gen_symcen_ops(ops, cen) # Convert to", "% (allops[n], magops[n])) sym, mag, tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n in range(len(sym)):", "information contained. \"\"\" import sys,os import numpy as np import matplotlib.pyplot as plt", "cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):' % len(ops)) print(ops) print('Centring Operations (%d):' % len(cen)) print(cen)", "look at the information contained. \"\"\" import sys,os import numpy as np import", "f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry", "Convert to magnetic symmetry magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) | %-40s' % ('Symmetry", "'../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz']", "Operations', len(allops), 'Magnetic operations')) for n in range(len(allops)): print('%40s | %-40s' % (allops[n],", "contained. \"\"\" import sys,os import numpy as np import matplotlib.pyplot as plt #", "# Convert to magnetic symmetry magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) | %-40s' %", "os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction as dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\"", "with centring allops = dif.fc.gen_symcen_ops(ops, cen) # Convert to magnetic symmetry magops =", "Operations (%d):' % len(ops)) print(ops) print('Centring Operations (%d):' % len(cen)) print(cen) # Combine", "numpy as np import matplotlib.pyplot as plt # Plotting cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf,", "allops = dif.fc.gen_symcen_ops(ops, cen) # Convert to magnetic symmetry magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s", "= cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):' % len(ops)) print(ops) print('Centring Operations (%d):' % len(cen))", "import matplotlib.pyplot as plt # Plotting cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction", "= dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) | %-40s' % ('Symmetry Operations', len(allops), 'Magnetic operations')) for", "as np import matplotlib.pyplot as plt # Plotting cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..'))", "f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz'] cen", "'Magnetic operations')) for n in range(len(allops)): print('%40s | %-40s' % (allops[n], magops[n])) sym,", "range(len(allops)): print('%40s | %-40s' % (allops[n], magops[n])) sym, mag, tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry')", "n in range(len(allops)): print('%40s | %-40s' % (allops[n], magops[n])) sym, mag, tim =", "= dif.fc.gen_symcen_ops(ops, cen) # Convert to magnetic symmetry magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d)", "in range(len(allops)): print('%40s | %-40s' % (allops[n], magops[n])) sym, mag, tim = dif.fc.cif_symmetry(cif)", "print('%40s | %-40s' % (allops[n], magops[n])) sym, mag, tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for", "= cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):' % len(ops)) print(ops) print('Centring Operations", "matplotlib.pyplot as plt # Plotting cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction as", "%-40s' % ('Symmetry Operations', len(allops), 'Magnetic operations')) for n in range(len(allops)): print('%40s |", "and look at the information contained. \"\"\" import sys,os import numpy as np", "\"\"\" import sys,os import numpy as np import matplotlib.pyplot as plt # Plotting", "print('Centring Operations (%d):' % len(cen)) print(cen) # Combine operations with centring allops =", "| %-40s' % (allops[n], magops[n])) sym, mag, tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n", "the information contained. \"\"\" import sys,os import numpy as np import matplotlib.pyplot as", "import Dans_Diffraction as dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f)", "cif = dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):' %", "Load space groups and look at the information contained. \"\"\" import sys,os import", "| %-40s' % ('Symmetry Operations', len(allops), 'Magnetic operations')) for n in range(len(allops)): print('%40s", "magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) | %-40s' % ('Symmetry Operations', len(allops), 'Magnetic operations'))", "at the information contained. \"\"\" import sys,os import numpy as np import matplotlib.pyplot", "sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction as dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif", "dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz']", "Dans_Diffraction as dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f) ops", "cen) # Convert to magnetic symmetry magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) | %-40s'", "<gh_stars>10-100 \"\"\" Dans_Diffraction Examples Load space groups and look at the information contained.", "tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n in range(len(sym)): print('%40s | %+d | %-40s'", "('Symmetry Operations', len(allops), 'Magnetic operations')) for n in range(len(allops)): print('%40s | %-40s' %", "import sys,os import numpy as np import matplotlib.pyplot as plt # Plotting cf", "(%d) | %-40s' % ('Symmetry Operations', len(allops), 'Magnetic operations')) for n in range(len(allops)):", "mag, tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n in range(len(sym)): print('%40s | %+d |", "= \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations", "Dans_Diffraction Examples Load space groups and look at the information contained. \"\"\" import", "% ('Symmetry Operations', len(allops), 'Magnetic operations')) for n in range(len(allops)): print('%40s | %-40s'", "to magnetic symmetry magops = dif.fc.symmetry_ops2magnetic(allops) print('\\n%35s (%d) | %-40s' % ('Symmetry Operations',", "ops = cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):' % len(ops)) print(ops) print('Centring", "centring allops = dif.fc.gen_symcen_ops(ops, cen) # Convert to magnetic symmetry magops = dif.fc.symmetry_ops2magnetic(allops)", "'..')) import Dans_Diffraction as dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif =", "print('Symmetry Operations (%d):' % len(ops)) print(ops) print('Centring Operations (%d):' % len(cen)) print(cen) #", "sym, mag, tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n in range(len(sym)): print('%40s | %+d", "%-40s' % (allops[n], magops[n])) sym, mag, tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n in", "operations with centring allops = dif.fc.gen_symcen_ops(ops, cen) # Convert to magnetic symmetry magops", "groups and look at the information contained. \"\"\" import sys,os import numpy as", "dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n in range(len(sym)): print('%40s | %+d | %-40s' % (sym[n],", "dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz'] cen = cif['_space_group_symop_magn_centering.xyz'] print('Symmetry Operations (%d):' % len(ops)) print(ops)", "for n in range(len(sym)): print('%40s | %+d | %-40s' % (sym[n], tim[n], mag[n]))", "(allops[n], magops[n])) sym, mag, tim = dif.fc.cif_symmetry(cif) print('\\ncif_symmetry') for n in range(len(sym)): print('%40s", "np import matplotlib.pyplot as plt # Plotting cf = os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import", "= os.path.dirname(__file__) sys.path.insert(0,os.path.join(cf, '..')) import Dans_Diffraction as dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f =", "= '../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f) ops = cif['_space_group_symop_magn_operation.xyz'] cen =", "print(ops) print('Centring Operations (%d):' % len(cen)) print(cen) # Combine operations with centring allops", "print('\\n%35s (%d) | %-40s' % ('Symmetry Operations', len(allops), 'Magnetic operations')) for n in", "import numpy as np import matplotlib.pyplot as plt # Plotting cf = os.path.dirname(__file__)", "as dif f = '../Dans_Diffraction/Structures/LaMnO3.mcif' f = \"../Dans_Diffraction/Structures/Sr3LiRuO6_C2'c'.mcif\" cif = dif.readcif(f) ops =" ]
[ "from __future__ import unicode_literals import datetime from django.db import migrations, models class Migration(migrations.Migration):", "operations = [ migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017, 5, 26, 10, 44, 7, 194576)),", "= [ ('distances', '0010_auto_20170519_1604'), ] operations = [ migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017, 5,", "import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('distances',", "'0010_auto_20170519_1604'), ] operations = [ migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017, 5, 26, 10, 44,", "datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('distances', '0010_auto_20170519_1604'),", "Migration(migrations.Migration): dependencies = [ ('distances', '0010_auto_20170519_1604'), ] operations = [ migrations.AlterField( model_name='dates', name='startDate',", "Generated by Django 1.11 on 2017-06-02 07:44 from __future__ import unicode_literals import datetime", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('distances', '0010_auto_20170519_1604'), ] operations", "Django 1.11 on 2017-06-02 07:44 from __future__ import unicode_literals import datetime from django.db", "utf-8 -*- # Generated by Django 1.11 on 2017-06-02 07:44 from __future__ import", "2017-06-02 07:44 from __future__ import unicode_literals import datetime from django.db import migrations, models", "dependencies = [ ('distances', '0010_auto_20170519_1604'), ] operations = [ migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017,", "on 2017-06-02 07:44 from __future__ import unicode_literals import datetime from django.db import migrations,", "= [ migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017, 5, 26, 10, 44, 7, 194576)), ),", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('distances', '0010_auto_20170519_1604'), ]", "('distances', '0010_auto_20170519_1604'), ] operations = [ migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017, 5, 26, 10,", "# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-06-02 07:44", "migrations, models class Migration(migrations.Migration): dependencies = [ ('distances', '0010_auto_20170519_1604'), ] operations = [", "__future__ import unicode_literals import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies", "07:44 from __future__ import unicode_literals import datetime from django.db import migrations, models class", "class Migration(migrations.Migration): dependencies = [ ('distances', '0010_auto_20170519_1604'), ] operations = [ migrations.AlterField( model_name='dates',", "coding: utf-8 -*- # Generated by Django 1.11 on 2017-06-02 07:44 from __future__", "-*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-06-02 07:44 from", "<filename>distances/migrations/0011_auto_20170602_1044.py # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-06-02", "# Generated by Django 1.11 on 2017-06-02 07:44 from __future__ import unicode_literals import", "models class Migration(migrations.Migration): dependencies = [ ('distances', '0010_auto_20170519_1604'), ] operations = [ migrations.AlterField(", "[ migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017, 5, 26, 10, 44, 7, 194576)), ), ]", "-*- # Generated by Django 1.11 on 2017-06-02 07:44 from __future__ import unicode_literals", "1.11 on 2017-06-02 07:44 from __future__ import unicode_literals import datetime from django.db import", "unicode_literals import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "[ ('distances', '0010_auto_20170519_1604'), ] operations = [ migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017, 5, 26,", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('distances', '0010_auto_20170519_1604'), ] operations =", "] operations = [ migrations.AlterField( model_name='dates', name='startDate', field=models.DateField(default=datetime.datetime(2017, 5, 26, 10, 44, 7,", "import unicode_literals import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "by Django 1.11 on 2017-06-02 07:44 from __future__ import unicode_literals import datetime from" ]
[ "name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/', UserPostListView.as_view(), name='user-post'), path('post/comment/<int:pk>/delete/', views.CommentDeleteView.as_view(), name='comment-delete'),", "PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView, ) urlpatterns = [ path('',", "name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/', UserPostListView.as_view(), name='user-post'), path('post/comment/<int:pk>/delete/', views.CommentDeleteView.as_view(), name='comment-delete'), path('post/<int:pk>/like/', LikeView, name='like-post'),", "CommentDeleteView, UserPostListView, LikeView, ) urlpatterns = [ path('', PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'),", "PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView, ) urlpatterns = [ path('', PostListView.as_view(), name='index'),", "LikeView, ) urlpatterns = [ path('', PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(),", "path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/', UserPostListView.as_view(), name='user-post'), path('post/comment/<int:pk>/delete/', views.CommentDeleteView.as_view(), name='comment-delete'), path('post/<int:pk>/like/',", "import path from blog import views from blog.views import ( PostListView, PostDetailView, PostCreateView,", "from blog.views import ( PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView, )", "views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/', UserPostListView.as_view(), name='user-post'), path('post/comment/<int:pk>/delete/', views.CommentDeleteView.as_view(),", "name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/', UserPostListView.as_view(), name='user-post'),", "blog import views from blog.views import ( PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView,", "views from blog.views import ( PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView,", "blog.views import ( PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView, ) urlpatterns", "PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView, ) urlpatterns = [ path('', PostListView.as_view(),", "views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/', UserPostListView.as_view(), name='user-post'), path('post/comment/<int:pk>/delete/', views.CommentDeleteView.as_view(), name='comment-delete'), path('post/<int:pk>/like/', LikeView,", "import views from blog.views import ( PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView,", "path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/',", "path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/', UserPostListView.as_view(), name='user-post'), path('post/comment/<int:pk>/delete/', views.CommentDeleteView.as_view(), name='comment-delete'), path('post/<int:pk>/like/', LikeView, name='like-post'), ]", "PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView, ) urlpatterns = [ path('', PostListView.as_view(), name='index'), path('post/new/',", "= [ path('', PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(),", "path('', PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/',", ") urlpatterns = [ path('', PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'),", "name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'),", "views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/', UserPostListView.as_view(),", "[ path('', PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'),", "( PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView, ) urlpatterns = [", "UserPostListView, LikeView, ) urlpatterns = [ path('', PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/',", "django.urls import path from blog import views from blog.views import ( PostListView, PostDetailView,", "path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'), path('user/<str:username>/', UserPostListView.as_view(), name='user-post'), path('post/comment/<int:pk>/delete/',", "urlpatterns = [ path('', PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/',", "PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'), path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', views.PostDeleteView.as_view(),", "path from blog import views from blog.views import ( PostListView, PostDetailView, PostCreateView, PostUpdateView,", "from blog import views from blog.views import ( PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView,", "import ( PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView, ) urlpatterns =", "from django.urls import path from blog import views from blog.views import ( PostListView,", "PostDeleteView, CommentDeleteView, UserPostListView, LikeView, ) urlpatterns = [ path('', PostListView.as_view(), name='index'), path('post/new/', views.PostCreateView.as_view()," ]
[ "Y) = H(X) + H(Y) - H(X,Y) Where H(X), H(Y) and H(X,Y) are", "prob1_2) else: return (mutual_information, entropy1, entropy2, entropy1_2) def plot_2dhist(data, path, title): \"\"\" Helper", "proficiency (normalized mutual information) of an image pair \"\"\" alpha_data = load_image(alpha) beta_data", "displacement maps \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map = load_map(cmp_map_path) pfire_map =", "marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map {} component, R^2={:0.3}\".format(dim, corr**2)) plt.legend()", "mi_start = calculate_proficiency(fixed_path, moved_path) mi_accepted = calculate_proficiency(fixed_path, accepted_path) mi_pfire = calculate_proficiency(fixed_path, pfire_path) mi_comparison", "fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs. Moved normalized mutual information: \" \"{:0.3f}\".format(mi_start.mi)))", "image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath))) except IndexError: break print(tabulate(table_entries, headers=\"firstrow\", tablefmt=\"grid\")) table = tabulate(table_entries,", "= H(X) + H(Y) - H(X,Y) Where H(X), H(Y) and H(X,Y) are the", "= calculate_proficiency(fixed_path, accepted_path) mi_pfire = calculate_proficiency(fixed_path, pfire_path) mi_comparison = calculate_proficiency(accepted_path, pfire_path) res_table =", "moved_path) mi_accepted = calculate_proficiency(fixed_path, accepted_path) mi_pfire = calculate_proficiency(fixed_path, pfire_path) mi_comparison = calculate_proficiency(accepted_path, pfire_path)", "cmpname='Accepted'): \"\"\"Compare ShIRT and pFIRE displacement maps \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True)", "os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs Fixed normalized mutual information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir,", "plot_2dhist(data, path, title): \"\"\" Helper function to plot 2d histogram and return rst", "fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else: fig_dir = os.path.normpath('.') mi_start = calculate_proficiency(fixed_path, moved_path) mi_accepted", "\" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{} vs. Fixed normalized mutual information: \"", "dimension:\", \"\")] image_entries = [] for didx, dim in enumerate(['X', 'Y', 'Z']): try:", "histogram and return rst inclusion command. \"\"\" plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path)", "probabilities and the joint probability of the data. N.B it is assumed that", "- entropy1_2 if return_hist: return (mutual_information, entropy1, entropy2, entropy1_2, prob1_2) else: return (mutual_information,", "provided probability distribution Shannon Entropy is defined as $H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\"", "path, title): \"\"\" Helper function to plot 2d histogram and return rst inclusion", "\"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT and pFIRE displacement maps \"\"\"", "vs. {} normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path,", "and pFIRE displacement maps \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map = load_map(cmp_map_path)", "Mathematical analysis functions for image and map comparison \"\"\" from collections import namedtuple", "'Y', 'Z']): try: corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir: savepath =", "datasets are independent. Returns a tuple of MI(X,Y), H(X), H(Y), H(X,Y) \"\"\" jointmax", "for didx, dim in enumerate(['X', 'Y', 'Z']): try: corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim),", "information (proficiency):\", \"\"), (\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs.", "calculate probability density bin_edges = np.linspace(0, 1, num=resolution) prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax,", "\"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs Fixed normalized mutual information: \"", "os.path.join(fig_dir, \"accepted.png\"), \"{} vs. Fixed normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist,", "vs. Moved normalized mutual information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{} vs.", "def calculate_entropy(prob_dist): r\"\"\" Calculate Shannon entropy of the provided probability distribution Shannon Entropy", "as sps from tabulate import tabulate from .image_routines import load_image, load_map MIResult =", "it is assumed that the two datasets are independent. Returns a tuple of", "fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map = load_map(cmp_map_path) pfire_map = load_map(pfire_map_path) table_entries = [(\"Map", "maps \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map = load_map(cmp_map_path) pfire_map = load_map(pfire_map_path)", "normed_prob_dist = prob_dist[prob_dist > 0] normed_prob_dist /= normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist))", "pfire_map = load_map(pfire_map_path) table_entries = [(\"Map coefficients of determination (R^2), by dimension:\", \"\")]", "40))) plt.savefig(path) plt.close() return \".. image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta): \"\"\" Calculate proficiency", "vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') + \"\\n\") rst_output", "and return rst inclusion command. \"\"\" plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close()", "plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath))) except IndexError: break print(tabulate(table_entries, headers=\"firstrow\", tablefmt=\"grid\"))", "mode=0o755, exist_ok=True) cmp_map = load_map(cmp_map_path) pfire_map = load_map(pfire_map_path) table_entries = [(\"Map coefficients of", "corr**2)) plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath))) except IndexError: break print(tabulate(table_entries, headers=\"firstrow\",", "plt.title(\"Map {} component, R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath))) except", "normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs Fixed", "must be followed by blank line image_rst = [] if fig_dir: image_rst.append(plot_2dhist( mi_start.hist,", "image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{} vs. Fixed normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi)))", "headers=\"firstrow\", tablefmt='grid') + \"\\n\") rst_output = [] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") # table", "dim in enumerate(['X', 'Y', 'Z']): try: corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if", "mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs. Moved normalized mutual information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist,", "and map comparison \"\"\" from collections import namedtuple from textwrap import wrap import", "calculate_proficiency(fixed_path, pfire_path) mi_comparison = calculate_proficiency(accepted_path, pfire_path) res_table = [(\"Normalized mutual information (proficiency):\", \"\"),", "res[-1]) def compare_image_results(fixed_path, moved_path, accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT and pFIRE registered", "\"\\n\") rst_output = [] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") # table must be followed", "for image and map comparison \"\"\" from collections import namedtuple from textwrap import", "information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs. {} normalized mutual information:", "map comparison \"\"\" from collections import namedtuple from textwrap import wrap import os", "res[2]) return MIResult(prof, res[-1]) def compare_image_results(fixed_path, moved_path, accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT", "cmpname=\"accepted\"): \"\"\"Compare ShIRT and pFIRE registered images \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True)", "MI(X,Y), H(X), H(Y), H(X,Y) \"\"\" jointmax = max(data1.max(), data2.max()) # First calculate probability", "rst inclusion command. \"\"\" plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close() return \"..", "entropy2, entropy1_2) def plot_2dhist(data, path, title): \"\"\" Helper function to plot 2d histogram", "(\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table,", "else: fig_dir = os.path.normpath('.') mi_start = calculate_proficiency(fixed_path, moved_path) mi_accepted = calculate_proficiency(fixed_path, accepted_path) mi_pfire", "\"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map {} component, R^2={:0.3}\".format(dim,", "\"\"\"Compare ShIRT and pFIRE registered images \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else:", "of an image pair \"\"\" alpha_data = load_image(alpha) beta_data = load_image(beta) res =", "(\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT and pFIRE displacement maps", "return \".. image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta): \"\"\" Calculate proficiency (normalized mutual information)", "plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map {} component, R^2={:0.3}\".format(dim, corr**2))", "is defined as $H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" # First disregard all values", "res = calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof = res[0]/min(res[1], res[2]) return MIResult(prof, res[-1]) def", "\"\"\" Mathematical analysis functions for image and map comparison \"\"\" from collections import", "+ H(Y) - H(X,Y) Where H(X), H(Y) and H(X,Y) are the Shannon entropies", "mutual_information = entropy1 + entropy2 - entropy1_2 if return_hist: return (mutual_information, entropy1, entropy2,", "mi_comparison = calculate_proficiency(accepted_path, pfire_path) res_table = [(\"Normalized mutual information (proficiency):\", \"\"), (\"Fixed vs.", "defined as: MI(X, Y) = H(X) + H(Y) - H(X,Y) Where H(X), H(Y)", "entropy1_2, prob1_2) else: return (mutual_information, entropy1, entropy2, entropy1_2) def plot_2dhist(data, path, title): \"\"\"", "\"\"\" plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close() return \".. image:: {}\\n\".format(os.path.basename(path)) def", "\"\"\"Compare ShIRT and pFIRE displacement maps \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map", "registered images \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else: fig_dir = os.path.normpath('.') mi_start", "of provided data. Mutual Information is defined as: MI(X, Y) = H(X) +", "entropies of the probabilities and the joint probability of the data. N.B it", "by dimension:\", \"\")] image_entries = [] for didx, dim in enumerate(['X', 'Y', 'Z']):", "to plot 2d histogram and return rst inclusion command. \"\"\" plt.matshow(data, origin='lower', cmap='gray')", "# First disregard all values where p_X == 0 to avoid nans from", "\" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs. {} normalized mutual information: \"", "return (mutual_information, entropy1, entropy2, entropy1_2) def plot_2dhist(data, path, title): \"\"\" Helper function to", "data2.max()) # First calculate probability density bin_edges = np.linspace(0, 1, num=resolution) prob1_2, _,", "if return_hist: return (mutual_information, entropy1, entropy2, entropy1_2, prob1_2) else: return (mutual_information, entropy1, entropy2,", "be followed by blank line image_rst = [] if fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir,", "N.B it is assumed that the two datasets are independent. Returns a tuple", "def compare_image_results(fixed_path, moved_path, accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT and pFIRE registered images", "probability distribution Shannon Entropy is defined as $H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" #", "\"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') + \"\\n\") rst_output = [] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\")", "H(X), H(Y) and H(X,Y) are the Shannon entropies of the probabilities and the", "os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map {} component,", "= max(data1.max(), data2.max()) # First calculate probability density bin_edges = np.linspace(0, 1, num=resolution)", "def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT and pFIRE displacement maps \"\"\" if", "os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map = load_map(cmp_map_path) pfire_map = load_map(pfire_map_path) table_entries = [(\"Map coefficients", "= load_map(pfire_map_path) table_entries = [(\"Map coefficients of determination (R^2), by dimension:\", \"\")] image_entries", "prob2 = np.sum(prob1_2, axis=0) entropy1 = calculate_entropy(prob1) entropy2 = calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2)", "pfire_path) mi_comparison = calculate_proficiency(accepted_path, pfire_path) res_table = [(\"Normalized mutual information (proficiency):\", \"\"), (\"Fixed", "\"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') + \"\\n\") rst_output = []", "jointmax = max(data1.max(), data2.max()) # First calculate probability density bin_edges = np.linspace(0, 1,", "r\"\"\" Calculate Shannon entropy of the provided probability distribution Shannon Entropy is defined", "(\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') + \"\\n\") rst_output = [] rst_output.append(tabulate(res_table,", "values where p_X == 0 to avoid nans from log(p_X) normed_prob_dist = prob_dist[prob_dist", "image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs. {} normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi)))", ".image_routines import load_image, load_map MIResult = namedtuple(\"mi_result\", ['mi', 'hist']) def calculate_entropy(prob_dist): r\"\"\" Calculate", "sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir: savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none',", "#!/usr/bin/env python3 \"\"\" Mathematical analysis functions for image and map comparison \"\"\" from", "\"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map = load_map(cmp_map_path) pfire_map = load_map(pfire_map_path) table_entries", "\"\"\" Helper function to plot 2d histogram and return rst inclusion command. \"\"\"", "['mi', 'hist']) def calculate_entropy(prob_dist): r\"\"\" Calculate Shannon entropy of the provided probability distribution", "Fixed normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs", "of the data. N.B it is assumed that the two datasets are independent.", "Information is defined as: MI(X, Y) = H(X) + H(Y) - H(X,Y) Where", "inclusion command. \"\"\" plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close() return \".. image::", "[] for didx, dim in enumerate(['X', 'Y', 'Z']): try: corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2]", "where p_X == 0 to avoid nans from log(p_X) normed_prob_dist = prob_dist[prob_dist >", "return_hist=True) prof = res[0]/min(res[1], res[2]) return MIResult(prof, res[-1]) def compare_image_results(fixed_path, moved_path, accepted_path, pfire_path,", "normalized mutual information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{} vs. Fixed normalized", "\"\"\" from collections import namedtuple from textwrap import wrap import os import numpy", "H(Y) - H(X,Y) Where H(X), H(Y) and H(X,Y) are the Shannon entropies of", "independent. Returns a tuple of MI(X,Y), H(X), H(Y), H(X,Y) \"\"\" jointmax = max(data1.max(),", "image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs. Moved normalized mutual information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist(", "{}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') + \"\\n\") rst_output = [] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\"))", "entropy1_2 = calculate_entropy(prob1_2) mutual_information = entropy1 + entropy2 - entropy1_2 if return_hist: return", "coefficients of determination (R^2), by dimension:\", \"\")] image_entries = [] for didx, dim", "$H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" # First disregard all values where p_X ==", "\" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs Fixed normalized mutual information:", "import namedtuple from textwrap import wrap import os import numpy as np import", "beta): \"\"\" Calculate proficiency (normalized mutual information) of an image pair \"\"\" alpha_data", "of the probabilities and the joint probability of the data. N.B it is", "all values where p_X == 0 to avoid nans from log(p_X) normed_prob_dist =", "from collections import namedtuple from textwrap import wrap import os import numpy as", "accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT and pFIRE registered images \"\"\" if fig_dir:", "data. Mutual Information is defined as: MI(X, Y) = H(X) + H(Y) -", "tuple of MI(X,Y), H(X), H(Y), H(X,Y) \"\"\" jointmax = max(data1.max(), data2.max()) # First", "try: corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir: savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower()))", "pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir: savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname)", "import wrap import os import numpy as np import matplotlib.pyplot as plt import", "\"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else: fig_dir = os.path.normpath('.') mi_start = calculate_proficiency(fixed_path,", "\"accepted.png\"), \"{} vs. Fixed normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir,", "{}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta): \"\"\" Calculate proficiency (normalized mutual information) of an image", "normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None,", "calculate_mutual_information(data1, data2, resolution=50, return_hist=False): r\"\"\" Calculate mutual information using Shannon entropy of provided", "entropy2 = calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2) mutual_information = entropy1 + entropy2 - entropy1_2", "mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs Fixed normalized mutual information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist,", "Calculate mutual information using Shannon entropy of provided data. Mutual Information is defined", "prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True) prob1 = np.sum(prob1_2, axis=1) prob2", "enumerate(['X', 'Y', 'Z']): try: corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir: savepath", "of the provided probability distribution Shannon Entropy is defined as $H(X) = \\sum_n", "density bin_edges = np.linspace(0, 1, num=resolution) prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges,", "vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') + \"\\n\") rst_output = [] rst_output.append(tabulate(res_table, headers=\"firstrow\",", "= np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True) prob1 = np.sum(prob1_2, axis=1) prob2 = np.sum(prob1_2, axis=0)", "Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname),", "np import matplotlib.pyplot as plt import scipy.stats as sps from tabulate import tabulate", "calculate_proficiency(fixed_path, accepted_path) mi_pfire = calculate_proficiency(fixed_path, pfire_path) mi_comparison = calculate_proficiency(accepted_path, pfire_path) res_table = [(\"Normalized", "load_image(alpha) beta_data = load_image(beta) res = calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof = res[0]/min(res[1], res[2])", "= calculate_proficiency(fixed_path, moved_path) mi_accepted = calculate_proficiency(fixed_path, accepted_path) mi_pfire = calculate_proficiency(fixed_path, pfire_path) mi_comparison =", "Shannon entropy of provided data. Mutual Information is defined as: MI(X, Y) =", "nans from log(p_X) normed_prob_dist = prob_dist[prob_dist > 0] normed_prob_dist /= normed_prob_dist.sum() entropy =", "pFIRE registered images \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else: fig_dir = os.path.normpath('.')", "from textwrap import wrap import os import numpy as np import matplotlib.pyplot as", "plt.savefig(savepath) plt.close() image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath))) except IndexError: break print(tabulate(table_entries, headers=\"firstrow\", tablefmt=\"grid\")) table", "np.sum(prob1_2, axis=1) prob2 = np.sum(prob1_2, axis=0) entropy1 = calculate_entropy(prob1) entropy2 = calculate_entropy(prob2) entropy1_2", "= calculate_entropy(prob1_2) mutual_information = entropy1 + entropy2 - entropy1_2 if return_hist: return (mutual_information,", "data. N.B it is assumed that the two datasets are independent. Returns a", "<filename>benchmarking/pfire_benchmarking/analysis_routines.py #!/usr/bin/env python3 \"\"\" Mathematical analysis functions for image and map comparison \"\"\"", "the data. N.B it is assumed that the two datasets are independent. Returns", "tabulate import tabulate from .image_routines import load_image, load_map MIResult = namedtuple(\"mi_result\", ['mi', 'hist'])", "(\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') + \"\\n\")", "vs. Fixed normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE", "= np.sum(prob1_2, axis=1) prob2 = np.sum(prob1_2, axis=0) entropy1 = calculate_entropy(prob1) entropy2 = calculate_entropy(prob2)", "'Z']): try: corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir: savepath = os.path.join(fig_dir,", "1, num=resolution) prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True) prob1 = np.sum(prob1_2,", "ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map {} component, R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath)", "= [] for didx, dim in enumerate(['X', 'Y', 'Z']): try: corr = sps.linregress(cmp_map[didx].flatten(),", "H(X,Y) Where H(X), H(Y) and H(X,Y) are the Shannon entropies of the probabilities", "= calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof = res[0]/min(res[1], res[2]) return MIResult(prof, res[-1]) def compare_image_results(fixed_path,", "\"{:0.3}\".format(corr**2))) if fig_dir: savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+',", "as: MI(X, Y) = H(X) + H(Y) - H(X,Y) Where H(X), H(Y) and", "rst_output.append(\"\") # table must be followed by blank line image_rst = [] if", "ShIRT and pFIRE displacement maps \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map =", "calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2) mutual_information = entropy1 + entropy2 - entropy1_2 if return_hist:", "ShIRT and pFIRE registered images \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else: fig_dir", "disregard all values where p_X == 0 to avoid nans from log(p_X) normed_prob_dist", "density=True) prob1 = np.sum(prob1_2, axis=1) prob2 = np.sum(prob1_2, axis=0) entropy1 = calculate_entropy(prob1) entropy2", "load_map MIResult = namedtuple(\"mi_result\", ['mi', 'hist']) def calculate_entropy(prob_dist): r\"\"\" Calculate Shannon entropy of", "return_hist=False): r\"\"\" Calculate mutual information using Shannon entropy of provided data. Mutual Information", "= load_image(beta) res = calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof = res[0]/min(res[1], res[2]) return MIResult(prof,", "\"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{} vs. Fixed normalized mutual information: \" \"{:0.3f}\".format(cmpname,", "of MI(X,Y), H(X), H(Y), H(X,Y) \"\"\" jointmax = max(data1.max(), data2.max()) # First calculate", "/= normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return entropy def calculate_mutual_information(data1, data2, resolution=50,", "= \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" # First disregard all values where p_X == 0", "plt import scipy.stats as sps from tabulate import tabulate from .image_routines import load_image,", "mi_accepted = calculate_proficiency(fixed_path, accepted_path) mi_pfire = calculate_proficiency(fixed_path, pfire_path) mi_comparison = calculate_proficiency(accepted_path, pfire_path) res_table", "MIResult(prof, res[-1]) def compare_image_results(fixed_path, moved_path, accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT and pFIRE", "normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return entropy def calculate_mutual_information(data1, data2, resolution=50, return_hist=False):", "a tuple of MI(X,Y), H(X), H(Y), H(X,Y) \"\"\" jointmax = max(data1.max(), data2.max()) #", "# First calculate probability density bin_edges = np.linspace(0, 1, num=resolution) prob1_2, _, _", "normed_prob_dist /= normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return entropy def calculate_mutual_information(data1, data2,", "{}\" \"\".format(os.path.basename(savepath))) except IndexError: break print(tabulate(table_entries, headers=\"firstrow\", tablefmt=\"grid\")) table = tabulate(table_entries, headers=\"firstrow\", tablefmt=\"rst\")", "if fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs. Moved normalized mutual information: \"", "rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") # table must be followed by blank line image_rst", "(proficiency):\", \"\"), (\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\",", "analysis functions for image and map comparison \"\"\" from collections import namedtuple from", "is defined as: MI(X, Y) = H(X) + H(Y) - H(X,Y) Where H(X),", "an image pair \"\"\" alpha_data = load_image(alpha) beta_data = load_image(beta) res = calculate_mutual_information(alpha_data,", "blank line image_rst = [] if fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs.", "exist_ok=True) cmp_map = load_map(cmp_map_path) pfire_map = load_map(pfire_map_path) table_entries = [(\"Map coefficients of determination", "the Shannon entropies of the probabilities and the joint probability of the data.", "two datasets are independent. Returns a tuple of MI(X,Y), H(X), H(Y), H(X,Y) \"\"\"", "entropy of the provided probability distribution Shannon Entropy is defined as $H(X) =", "import tabulate from .image_routines import load_image, load_map MIResult = namedtuple(\"mi_result\", ['mi', 'hist']) def", "image_entries = [] for didx, dim in enumerate(['X', 'Y', 'Z']): try: corr =", "the two datasets are independent. Returns a tuple of MI(X,Y), H(X), H(Y), H(X,Y)", "functions for image and map comparison \"\"\" from collections import namedtuple from textwrap", "_, _ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True) prob1 = np.sum(prob1_2, axis=1) prob2 =", "= namedtuple(\"mi_result\", ['mi', 'hist']) def calculate_entropy(prob_dist): r\"\"\" Calculate Shannon entropy of the provided", "entropy2 - entropy1_2 if return_hist: return (mutual_information, entropy1, entropy2, entropy1_2, prob1_2) else: return", "as plt import scipy.stats as sps from tabulate import tabulate from .image_routines import", "Fixed normalized mutual information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs. {}", "H(Y) and H(X,Y) are the Shannon entropies of the probabilities and the joint", "mutual information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs Fixed normalized", "prob_dist[prob_dist > 0] normed_prob_dist /= normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return entropy", "\"\"), (\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)),", "didx, dim in enumerate(['X', 'Y', 'Z']): try: corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2)))", "First disregard all values where p_X == 0 to avoid nans from log(p_X)", "information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{} vs. Fixed normalized mutual information:", "data2, resolution=50, return_hist=False): r\"\"\" Calculate mutual information using Shannon entropy of provided data.", "entropy1 + entropy2 - entropy1_2 if return_hist: return (mutual_information, entropy1, entropy2, entropy1_2, prob1_2)", "(mutual_information, entropy1, entropy2, entropy1_2) def plot_2dhist(data, path, title): \"\"\" Helper function to plot", "\"\")] image_entries = [] for didx, dim in enumerate(['X', 'Y', 'Z']): try: corr", "\"\"\" alpha_data = load_image(alpha) beta_data = load_image(beta) res = calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof", "image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs Fixed normalized mutual information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist(", "p_X(x)\\log_2{p_X(x)}$ \"\"\" # First disregard all values where p_X == 0 to avoid", "function to plot 2d histogram and return rst inclusion command. \"\"\" plt.matshow(data, origin='lower',", "entropy def calculate_mutual_information(data1, data2, resolution=50, return_hist=False): r\"\"\" Calculate mutual information using Shannon entropy", "= np.linspace(0, 1, num=resolution) prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True) prob1", "are independent. Returns a tuple of MI(X,Y), H(X), H(Y), H(X,Y) \"\"\" jointmax =", "\"\"\" Calculate proficiency (normalized mutual information) of an image pair \"\"\" alpha_data =", "\"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') +", "wrap import os import numpy as np import matplotlib.pyplot as plt import scipy.stats", "from .image_routines import load_image, load_map MIResult = namedtuple(\"mi_result\", ['mi', 'hist']) def calculate_entropy(prob_dist): r\"\"\"", "H(X), H(Y), H(X,Y) \"\"\" jointmax = max(data1.max(), data2.max()) # First calculate probability density", "+ entropy2 - entropy1_2 if return_hist: return (mutual_information, entropy1, entropy2, entropy1_2, prob1_2) else:", "probability of the data. N.B it is assumed that the two datasets are", "image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta): \"\"\" Calculate proficiency (normalized mutual information) of an", "except IndexError: break print(tabulate(table_entries, headers=\"firstrow\", tablefmt=\"grid\")) table = tabulate(table_entries, headers=\"firstrow\", tablefmt=\"rst\") return (table,", "to avoid nans from log(p_X) normed_prob_dist = prob_dist[prob_dist > 0] normed_prob_dist /= normed_prob_dist.sum()", "followed by blank line image_rst = [] if fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"),", "cmp_map = load_map(cmp_map_path) pfire_map = load_map(pfire_map_path) table_entries = [(\"Map coefficients of determination (R^2),", "beta_data, return_hist=True) prof = res[0]/min(res[1], res[2]) return MIResult(prof, res[-1]) def compare_image_results(fixed_path, moved_path, accepted_path,", "prob1 = np.sum(prob1_2, axis=1) prob2 = np.sum(prob1_2, axis=0) entropy1 = calculate_entropy(prob1) entropy2 =", "assumed that the two datasets are independent. Returns a tuple of MI(X,Y), H(X),", "Shannon entropies of the probabilities and the joint probability of the data. N.B", "[(\"Normalized mutual information (proficiency):\", \"\"), (\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)),", "and the joint probability of the data. N.B it is assumed that the", "entropy1 = calculate_entropy(prob1) entropy2 = calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2) mutual_information = entropy1 +", "H(X,Y) are the Shannon entropies of the probabilities and the joint probability of", "pfire_path) res_table = [(\"Normalized mutual information (proficiency):\", \"\"), (\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{}", "(mutual_information, entropy1, entropy2, entropy1_2, prob1_2) else: return (mutual_information, entropy1, entropy2, entropy1_2) def plot_2dhist(data,", "\"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))]", "from log(p_X) normed_prob_dist = prob_dist[prob_dist > 0] normed_prob_dist /= normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist", "mutual information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs. {} normalized mutual", "entropy2, entropy1_2, prob1_2) else: return (mutual_information, entropy1, entropy2, entropy1_2) def plot_2dhist(data, path, title):", "mutual information using Shannon entropy of provided data. Mutual Information is defined as:", "\"pfire.png\"), \"pFIRE vs Fixed normalized mutual information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"),", "r\"\"\" Calculate mutual information using Shannon entropy of provided data. Mutual Information is", "\"pFIRE vs. {} normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def", "using Shannon entropy of provided data. Mutual Information is defined as: MI(X, Y)", "namedtuple(\"mi_result\", ['mi', 'hist']) def calculate_entropy(prob_dist): r\"\"\" Calculate Shannon entropy of the provided probability", "headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") # table must be followed by blank line image_rst =", "collections import namedtuple from textwrap import wrap import os import numpy as np", "fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT and pFIRE displacement maps \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755,", "calculate_entropy(prob1) entropy2 = calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2) mutual_information = entropy1 + entropy2 -", "os import numpy as np import matplotlib.pyplot as plt import scipy.stats as sps", "\\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" # First disregard all values where p_X == 0 to", "= os.path.normpath('.') mi_start = calculate_proficiency(fixed_path, moved_path) mi_accepted = calculate_proficiency(fixed_path, accepted_path) mi_pfire = calculate_proficiency(fixed_path,", "numpy as np import matplotlib.pyplot as plt import scipy.stats as sps from tabulate", "Entropy is defined as $H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" # First disregard all", "\" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT", "command. \"\"\" plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close() return \".. image:: {}\\n\".format(os.path.basename(path))", "0] normed_prob_dist /= normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return entropy def calculate_mutual_information(data1,", "mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs. {} normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return", "mutual information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{} vs. Fixed normalized mutual", "\"pFIRE vs Fixed normalized mutual information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE", "Where H(X), H(Y) and H(X,Y) are the Shannon entropies of the probabilities and", "load_image(beta) res = calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof = res[0]/min(res[1], res[2]) return MIResult(prof, res[-1])", "moved_path, accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT and pFIRE registered images \"\"\" if", "H(X) + H(Y) - H(X,Y) Where H(X), H(Y) and H(X,Y) are the Shannon", "plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map {} component, R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\"..", "import load_image, load_map MIResult = namedtuple(\"mi_result\", ['mi', 'hist']) def calculate_entropy(prob_dist): r\"\"\" Calculate Shannon", "= calculate_proficiency(fixed_path, pfire_path) mi_comparison = calculate_proficiency(accepted_path, pfire_path) res_table = [(\"Normalized mutual information (proficiency):\",", "entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return entropy def calculate_mutual_information(data1, data2, resolution=50, return_hist=False): r\"\"\"", "fig_dir = os.path.normpath('.') mi_start = calculate_proficiency(fixed_path, moved_path) mi_accepted = calculate_proficiency(fixed_path, accepted_path) mi_pfire =", "mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs Fixed normalized mutual information: \" \"{:0.3f}\".format(mi_pfire.mi)))", "probability density bin_edges = np.linspace(0, 1, num=resolution) prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax,", "image:: {}\" \"\".format(os.path.basename(savepath))) except IndexError: break print(tabulate(table_entries, headers=\"firstrow\", tablefmt=\"grid\")) table = tabulate(table_entries, headers=\"firstrow\",", "max(data1.max(), data2.max()) # First calculate probability density bin_edges = np.linspace(0, 1, num=resolution) prob1_2,", "plot 2d histogram and return rst inclusion command. \"\"\" plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title,", "return entropy def calculate_mutual_information(data1, data2, resolution=50, return_hist=False): r\"\"\" Calculate mutual information using Shannon", "return rst inclusion command. \"\"\" plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close() return", "pair \"\"\" alpha_data = load_image(alpha) beta_data = load_image(beta) res = calculate_mutual_information(alpha_data, beta_data, return_hist=True)", "def plot_2dhist(data, path, title): \"\"\" Helper function to plot 2d histogram and return", "scipy.stats as sps from tabulate import tabulate from .image_routines import load_image, load_map MIResult", "vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\",", "\".. image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta): \"\"\" Calculate proficiency (normalized mutual information) of", "calculate_entropy(prob1_2) mutual_information = entropy1 + entropy2 - entropy1_2 if return_hist: return (mutual_information, entropy1,", "(R^2), by dimension:\", \"\")] image_entries = [] for didx, dim in enumerate(['X', 'Y',", "def calculate_mutual_information(data1, data2, resolution=50, return_hist=False): r\"\"\" Calculate mutual information using Shannon entropy of", "mutual information (proficiency):\", \"\"), (\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE", "[(\"Map coefficients of determination (R^2), by dimension:\", \"\")] image_entries = [] for didx,", "rst_output = [] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") # table must be followed by", "entropy1_2 if return_hist: return (mutual_information, entropy1, entropy2, entropy1_2, prob1_2) else: return (mutual_information, entropy1,", "and pFIRE registered images \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else: fig_dir =", "_ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True) prob1 = np.sum(prob1_2, axis=1) prob2 = np.sum(prob1_2,", "information) of an image pair \"\"\" alpha_data = load_image(alpha) beta_data = load_image(beta) res", "determination (R^2), by dimension:\", \"\")] image_entries = [] for didx, dim in enumerate(['X',", "matplotlib.pyplot as plt import scipy.stats as sps from tabulate import tabulate from .image_routines", "= calculate_proficiency(accepted_path, pfire_path) res_table = [(\"Normalized mutual information (proficiency):\", \"\"), (\"Fixed vs. Moved:\",", "os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs. Moved normalized mutual information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir,", "[] if fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs. Moved normalized mutual information:", "== 0 to avoid nans from log(p_X) normed_prob_dist = prob_dist[prob_dist > 0] normed_prob_dist", "calculate_proficiency(accepted_path, pfire_path) res_table = [(\"Normalized mutual information (proficiency):\", \"\"), (\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)),", "load_map(pfire_map_path) table_entries = [(\"Map coefficients of determination (R^2), by dimension:\", \"\")] image_entries =", "as $H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" # First disregard all values where p_X", "Helper function to plot 2d histogram and return rst inclusion command. \"\"\" plt.matshow(data,", "origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close() return \".. image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta):", "{} normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path,", "accepted_path) mi_pfire = calculate_proficiency(fixed_path, pfire_path) mi_comparison = calculate_proficiency(accepted_path, pfire_path) res_table = [(\"Normalized mutual", "-np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return entropy def calculate_mutual_information(data1, data2, resolution=50, return_hist=False): r\"\"\" Calculate mutual", "provided data. Mutual Information is defined as: MI(X, Y) = H(X) + H(Y)", "calculate_entropy(prob_dist): r\"\"\" Calculate Shannon entropy of the provided probability distribution Shannon Entropy is", "np.log2(normed_prob_dist)) return entropy def calculate_mutual_information(data1, data2, resolution=50, return_hist=False): r\"\"\" Calculate mutual information using", "the probabilities and the joint probability of the data. N.B it is assumed", "np.linspace(0, 1, num=resolution) prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True) prob1 =", "image pair \"\"\" alpha_data = load_image(alpha) beta_data = load_image(beta) res = calculate_mutual_information(alpha_data, beta_data,", "pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT and pFIRE registered images \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir),", "mode=0o755, exist_ok=True) else: fig_dir = os.path.normpath('.') mi_start = calculate_proficiency(fixed_path, moved_path) mi_accepted = calculate_proficiency(fixed_path,", "Moved normalized mutual information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{} vs. Fixed", "information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"), \"pFIRE vs Fixed normalized mutual", "= load_image(alpha) beta_data = load_image(beta) res = calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof = res[0]/min(res[1],", "{} component, R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath))) except IndexError:", "= [(\"Map coefficients of determination (R^2), by dimension:\", \"\")] image_entries = [] for", "table must be followed by blank line image_rst = [] if fig_dir: image_rst.append(plot_2dhist(", "entropy1_2) def plot_2dhist(data, path, title): \"\"\" Helper function to plot 2d histogram and", "calculate_proficiency(fixed_path, moved_path) mi_accepted = calculate_proficiency(fixed_path, accepted_path) mi_pfire = calculate_proficiency(fixed_path, pfire_path) mi_comparison = calculate_proficiency(accepted_path,", "avoid nans from log(p_X) normed_prob_dist = prob_dist[prob_dist > 0] normed_prob_dist /= normed_prob_dist.sum() entropy", "load_map(cmp_map_path) pfire_map = load_map(pfire_map_path) table_entries = [(\"Map coefficients of determination (R^2), by dimension:\",", "if fig_dir: savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none',", "image and map comparison \"\"\" from collections import namedtuple from textwrap import wrap", "\"prereg.png\"), \"Fixed vs. Moved normalized mutual information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"),", "ls='none', label=\"pFIRE\") plt.title(\"Map {} component, R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\".. image:: {}\"", "in enumerate(['X', 'Y', 'Z']): try: corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir:", "fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT and pFIRE registered images \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755,", "images \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else: fig_dir = os.path.normpath('.') mi_start =", "Calculate proficiency (normalized mutual information) of an image pair \"\"\" alpha_data = load_image(alpha)", "mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{} vs. Fixed normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist(", "\"\".format(os.path.basename(savepath))) except IndexError: break print(tabulate(table_entries, headers=\"firstrow\", tablefmt=\"grid\")) table = tabulate(table_entries, headers=\"firstrow\", tablefmt=\"rst\") return", "= os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map {}", "comparison \"\"\" from collections import namedtuple from textwrap import wrap import os import", "(normalized mutual information) of an image pair \"\"\" alpha_data = load_image(alpha) beta_data =", "def calculate_proficiency(alpha, beta): \"\"\" Calculate proficiency (normalized mutual information) of an image pair", "= load_map(cmp_map_path) pfire_map = load_map(pfire_map_path) table_entries = [(\"Map coefficients of determination (R^2), by", "Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid')", "plt.savefig(path) plt.close() return \".. image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta): \"\"\" Calculate proficiency (normalized", "mi_pfire = calculate_proficiency(fixed_path, pfire_path) mi_comparison = calculate_proficiency(accepted_path, pfire_path) res_table = [(\"Normalized mutual information", "tabulate from .image_routines import load_image, load_map MIResult = namedtuple(\"mi_result\", ['mi', 'hist']) def calculate_entropy(prob_dist):", "= entropy1 + entropy2 - entropy1_2 if return_hist: return (mutual_information, entropy1, entropy2, entropy1_2,", "axis=1) prob2 = np.sum(prob1_2, axis=0) entropy1 = calculate_entropy(prob1) entropy2 = calculate_entropy(prob2) entropy1_2 =", "# table must be followed by blank line image_rst = [] if fig_dir:", "the joint probability of the data. N.B it is assumed that the two", "axis=0) entropy1 = calculate_entropy(prob1) entropy2 = calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2) mutual_information = entropy1", "fig_dir: savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\")", "if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map = load_map(cmp_map_path) pfire_map = load_map(pfire_map_path) table_entries =", "namedtuple from textwrap import wrap import os import numpy as np import matplotlib.pyplot", "joint probability of the data. N.B it is assumed that the two datasets", "2d histogram and return rst inclusion command. \"\"\" plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40)))", "import matplotlib.pyplot as plt import scipy.stats as sps from tabulate import tabulate from", "\"Fixed vs. Moved normalized mutual information: \" \"{:0.3f}\".format(mi_start.mi))) image_rst.append(plot_2dhist( mi_accepted.hist, os.path.join(fig_dir, \"accepted.png\"), \"{}", "resolution=50, return_hist=False): r\"\"\" Calculate mutual information using Shannon entropy of provided data. Mutual", "compare_image_results(fixed_path, moved_path, accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT and pFIRE registered images \"\"\"", "IndexError: break print(tabulate(table_entries, headers=\"firstrow\", tablefmt=\"grid\")) table = tabulate(table_entries, headers=\"firstrow\", tablefmt=\"rst\") return (table, \"\\n\".join(image_entries))", "Returns a tuple of MI(X,Y), H(X), H(Y), H(X,Y) \"\"\" jointmax = max(data1.max(), data2.max())", "'hist']) def calculate_entropy(prob_dist): r\"\"\" Calculate Shannon entropy of the provided probability distribution Shannon", "entropy1, entropy2, entropy1_2) def plot_2dhist(data, path, title): \"\"\" Helper function to plot 2d", "res[0]/min(res[1], res[2]) return MIResult(prof, res[-1]) def compare_image_results(fixed_path, moved_path, accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare", "os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs. {} normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output),", "from tabulate import tabulate from .image_routines import load_image, load_map MIResult = namedtuple(\"mi_result\", ['mi',", "savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map", "data2.flatten()/jointmax, bins=bin_edges, density=True) prob1 = np.sum(prob1_2, axis=1) prob2 = np.sum(prob1_2, axis=0) entropy1 =", "beta_data = load_image(beta) res = calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof = res[0]/min(res[1], res[2]) return", "mutual information) of an image pair \"\"\" alpha_data = load_image(alpha) beta_data = load_image(beta)", "- H(X,Y) Where H(X), H(Y) and H(X,Y) are the Shannon entropies of the", "Shannon entropy of the provided probability distribution Shannon Entropy is defined as $H(X)", "title): \"\"\" Helper function to plot 2d histogram and return rst inclusion command.", "table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir: savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x', ls='none', label=cmpname) plt.plot(pfire_map[didx].flatten(),", "H(X,Y) \"\"\" jointmax = max(data1.max(), data2.max()) # First calculate probability density bin_edges =", "return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT and pFIRE displacement", "import os import numpy as np import matplotlib.pyplot as plt import scipy.stats as", "= [] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") # table must be followed by blank", "p_X == 0 to avoid nans from log(p_X) normed_prob_dist = prob_dist[prob_dist > 0]", "H(Y), H(X,Y) \"\"\" jointmax = max(data1.max(), data2.max()) # First calculate probability density bin_edges", "sps from tabulate import tabulate from .image_routines import load_image, load_map MIResult = namedtuple(\"mi_result\",", "num=resolution) prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True) prob1 = np.sum(prob1_2, axis=1)", "np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True) prob1 = np.sum(prob1_2, axis=1) prob2 = np.sum(prob1_2, axis=0) entropy1", "os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else: fig_dir = os.path.normpath('.') mi_start = calculate_proficiency(fixed_path, moved_path) mi_accepted =", "calculate_proficiency(alpha, beta): \"\"\" Calculate proficiency (normalized mutual information) of an image pair \"\"\"", "alpha_data = load_image(alpha) beta_data = load_image(beta) res = calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof =", "= res[0]/min(res[1], res[2]) return MIResult(prof, res[-1]) def compare_image_results(fixed_path, moved_path, accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"):", "are the Shannon entropies of the probabilities and the joint probability of the", "line image_rst = [] if fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs. Moved", "textwrap import wrap import os import numpy as np import matplotlib.pyplot as plt", "= np.sum(prob1_2, axis=0) entropy1 = calculate_entropy(prob1) entropy2 = calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2) mutual_information", "res_table = [(\"Normalized mutual information (proficiency):\", \"\"), (\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs.", "log(p_X) normed_prob_dist = prob_dist[prob_dist > 0] normed_prob_dist /= normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist *", "by blank line image_rst = [] if fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed", "= -np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return entropy def calculate_mutual_information(data1, data2, resolution=50, return_hist=False): r\"\"\" Calculate", "label=cmpname) plt.plot(pfire_map[didx].flatten(), marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map {} component, R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath) plt.close()", "image_rst = [] if fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs. Moved normalized", "return MIResult(prof, res[-1]) def compare_image_results(fixed_path, moved_path, accepted_path, pfire_path, fig_dir=None, cmpname=\"accepted\"): \"\"\"Compare ShIRT and", "calculate_mutual_information(alpha_data, beta_data, return_hist=True) prof = res[0]/min(res[1], res[2]) return MIResult(prof, res[-1]) def compare_image_results(fixed_path, moved_path,", "table_entries = [(\"Map coefficients of determination (R^2), by dimension:\", \"\")] image_entries = []", "mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT and pFIRE", "\"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT and", "plt.matshow(data, origin='lower', cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close() return \".. image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha,", "as np import matplotlib.pyplot as plt import scipy.stats as sps from tabulate import", "distribution Shannon Entropy is defined as $H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" # First", "= [(\"Normalized mutual information (proficiency):\", \"\"), (\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname),", "tablefmt='grid') + \"\\n\") rst_output = [] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") # table must", "return (mutual_information, entropy1, entropy2, entropy1_2, prob1_2) else: return (mutual_information, entropy1, entropy2, entropy1_2) def", "R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath))) except IndexError: break print(tabulate(table_entries,", "entropy1, entropy2, entropy1_2, prob1_2) else: return (mutual_information, entropy1, entropy2, entropy1_2) def plot_2dhist(data, path,", "the provided probability distribution Shannon Entropy is defined as $H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$", "corr = sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir: savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(),", "load_image, load_map MIResult = namedtuple(\"mi_result\", ['mi', 'hist']) def calculate_entropy(prob_dist): r\"\"\" Calculate Shannon entropy", "python3 \"\"\" Mathematical analysis functions for image and map comparison \"\"\" from collections", "plt.close() image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath))) except IndexError: break print(tabulate(table_entries, headers=\"firstrow\", tablefmt=\"grid\")) table =", "mutual information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'):", "MIResult = namedtuple(\"mi_result\", ['mi', 'hist']) def calculate_entropy(prob_dist): r\"\"\" Calculate Shannon entropy of the", "defined as $H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" # First disregard all values where", "os.path.normpath('.') mi_start = calculate_proficiency(fixed_path, moved_path) mi_accepted = calculate_proficiency(fixed_path, accepted_path) mi_pfire = calculate_proficiency(fixed_path, pfire_path)", "vs Fixed normalized mutual information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs.", "pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT and pFIRE displacement maps \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir),", "Calculate Shannon entropy of the provided probability distribution Shannon Entropy is defined as", "information using Shannon entropy of provided data. Mutual Information is defined as: MI(X,", "(\"Fixed vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE", "pFIRE displacement maps \"\"\" if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) cmp_map = load_map(cmp_map_path) pfire_map", "cmap='gray') plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close() return \".. image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta): \"\"\"", "normalized mutual information: \" \"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs. {} normalized", "= sps.linregress(cmp_map[didx].flatten(), pfire_map[didx].flatten())[2] table_entries.append((\"{}:\".format(dim), \"{:0.3}\".format(corr**2))) if fig_dir: savepath = os.path.join(fig_dir, \"map_{}.png\".format(dim.lower())) plt.plot(cmp_map[didx].flatten(), marker='x',", "* np.log2(normed_prob_dist)) return entropy def calculate_mutual_information(data1, data2, resolution=50, return_hist=False): r\"\"\" Calculate mutual information", "np.sum(prob1_2, axis=0) entropy1 = calculate_entropy(prob1) entropy2 = calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2) mutual_information =", "tablefmt=\"rst\")) rst_output.append(\"\") # table must be followed by blank line image_rst = []", "label=\"pFIRE\") plt.title(\"Map {} component, R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath)))", "Mutual Information is defined as: MI(X, Y) = H(X) + H(Y) - H(X,Y)", "of determination (R^2), by dimension:\", \"\")] image_entries = [] for didx, dim in", "entropy of provided data. Mutual Information is defined as: MI(X, Y) = H(X)", "Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs. {}:\".format(cmpname), \"{:.3f}\\n\".format(mi_comparison.mi))] print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') + \"\\n\") rst_output =", "\"{:0.3f}\".format(mi_pfire.mi))) image_rst.append(plot_2dhist( mi_comparison.hist, os.path.join(fig_dir, \"comparison.png\"), \"pFIRE vs. {} normalized mutual information: \" \"{:0.3f}\".format(cmpname,", "First calculate probability density bin_edges = np.linspace(0, 1, num=resolution) prob1_2, _, _ =", "component, R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\".. image:: {}\" \"\".format(os.path.basename(savepath))) except IndexError: break", "vs. Moved:\", \"{:.3f}\".format(mi_start.mi)), (\"{} vs. Fixed:\".format(cmpname), \"{:.3f}\".format(mi_accepted.mi)), (\"pFIRE vs. Fixed:\", \"{:.3f}\".format(mi_pfire.mi)), (\"pFIRE vs.", "\"\"\" jointmax = max(data1.max(), data2.max()) # First calculate probability density bin_edges = np.linspace(0,", "\"comparison.png\"), \"pFIRE vs. {} normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst))", "that the two datasets are independent. Returns a tuple of MI(X,Y), H(X), H(Y),", "plt.title(\"\\n\".join(wrap(title, 40))) plt.savefig(path) plt.close() return \".. image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta): \"\"\" Calculate", "import scipy.stats as sps from tabulate import tabulate from .image_routines import load_image, load_map", "and H(X,Y) are the Shannon entropies of the probabilities and the joint probability", "= calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2) mutual_information = entropy1 + entropy2 - entropy1_2 if", "\"{} vs. Fixed normalized mutual information: \" \"{:0.3f}\".format(cmpname, mi_accepted.mi))) image_rst.append(plot_2dhist( mi_pfire.hist, os.path.join(fig_dir, \"pfire.png\"),", "0 to avoid nans from log(p_X) normed_prob_dist = prob_dist[prob_dist > 0] normed_prob_dist /=", "marker='+', ls='none', label=\"pFIRE\") plt.title(\"Map {} component, R^2={:0.3}\".format(dim, corr**2)) plt.legend() plt.savefig(savepath) plt.close() image_entries.append(\".. image::", "MI(X, Y) = H(X) + H(Y) - H(X,Y) Where H(X), H(Y) and H(X,Y)", "is assumed that the two datasets are independent. Returns a tuple of MI(X,Y),", "= calculate_entropy(prob1) entropy2 = calculate_entropy(prob2) entropy1_2 = calculate_entropy(prob1_2) mutual_information = entropy1 + entropy2", "\"\"\" # First disregard all values where p_X == 0 to avoid nans", "[] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") # table must be followed by blank line", "= [] if fig_dir: image_rst.append(plot_2dhist( mi_start.hist, os.path.join(fig_dir, \"prereg.png\"), \"Fixed vs. Moved normalized mutual", "plt.close() return \".. image:: {}\\n\".format(os.path.basename(path)) def calculate_proficiency(alpha, beta): \"\"\" Calculate proficiency (normalized mutual", "prof = res[0]/min(res[1], res[2]) return MIResult(prof, res[-1]) def compare_image_results(fixed_path, moved_path, accepted_path, pfire_path, fig_dir=None,", "if fig_dir: os.makedirs(os.path.normpath(fig_dir), mode=0o755, exist_ok=True) else: fig_dir = os.path.normpath('.') mi_start = calculate_proficiency(fixed_path, moved_path)", "exist_ok=True) else: fig_dir = os.path.normpath('.') mi_start = calculate_proficiency(fixed_path, moved_path) mi_accepted = calculate_proficiency(fixed_path, accepted_path)", "compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare ShIRT and pFIRE displacement maps \"\"\" if fig_dir:", "import numpy as np import matplotlib.pyplot as plt import scipy.stats as sps from", "bins=bin_edges, density=True) prob1 = np.sum(prob1_2, axis=1) prob2 = np.sum(prob1_2, axis=0) entropy1 = calculate_entropy(prob1)", "else: return (mutual_information, entropy1, entropy2, entropy1_2) def plot_2dhist(data, path, title): \"\"\" Helper function", "Shannon Entropy is defined as $H(X) = \\sum_n p_X(x)\\log_2{p_X(x)}$ \"\"\" # First disregard", "= prob_dist[prob_dist > 0] normed_prob_dist /= normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return", "bin_edges = np.linspace(0, 1, num=resolution) prob1_2, _, _ = np.histogram2d(data1.flatten()/jointmax, data2.flatten()/jointmax, bins=bin_edges, density=True)", "+ \"\\n\") rst_output = [] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") # table must be", "return_hist: return (mutual_information, entropy1, entropy2, entropy1_2, prob1_2) else: return (mutual_information, entropy1, entropy2, entropy1_2)", "information: \" \"{:0.3f}\".format(cmpname, mi_comparison.mi))) return (\"\\n\".join(rst_output), \"\\n\".join(image_rst)) def compare_map_results(cmp_map_path, pfire_map_path, fig_dir=None, cmpname='Accepted'): \"\"\"Compare", "> 0] normed_prob_dist /= normed_prob_dist.sum() entropy = -np.sum(normed_prob_dist * np.log2(normed_prob_dist)) return entropy def", "print(tabulate(res_table, headers=\"firstrow\", tablefmt='grid') + \"\\n\") rst_output = [] rst_output.append(tabulate(res_table, headers=\"firstrow\", tablefmt=\"rst\")) rst_output.append(\"\") #" ]
[ "9, 0, 2 ], # 0x5F '_' [ 851, 3, 3, 5, 2,", "0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77, 0x39,", "12, 5, 0, -11 ], # 0x2F '/' [ 137, 9, 13, 9,", "514, 13, 12, 12, 0, -11 ], # 0x4B 'K' [ 534, 11,", "0x60 '`' [ 853, 9, 8, 9, 0, -7 ], # 0x61 'a'", "0x74 't' [ 1074, 8, 8, 9, 1, -7 ], # 0x75 'u'", "0x5A 'Z' [ 807, 7, 15, 7, 0, -11 ], # 0x5B '['", "], # 0x58 'X' [ 775, 10, 12, 11, 2, -11 ], #", "-11 ], # 0x5A 'Z' [ 807, 7, 15, 7, 0, -11 ],", "0x0C, 0x81, 0x90, 0x14, 0x03, 0x00, 0x60, 0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C,", "0x07, 0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01, 0x80, 0x70,", "'7' [ 245, 9, 13, 9, 1, -12 ], # 0x38 '8' [", "], # 0x3D '=' [ 301, 9, 9, 10, 1, -8 ], #", "0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26, 0x20, 0x99, 0x84, 0x3C, 0x03,", "0x01, 0x04, 0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60, 0x06,", "], # 0x2B '+' [ 122, 2, 4, 5, 0, -1 ], #", "], # 0x21 '!' [ 6, 5, 4, 6, 3, -11 ], #", "4, 12, 5, 1, -11 ], # 0x6C 'l' [ 995, 13, 8,", "0xB9, 0x8E, 0x77, 0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8,", "0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39, 0x91, 0xCC,", "0x7F, 0xE9, 0x8E, 0x31, 0x04, 0x01, 0x80, 0x30, 0x06, 0x00, 0x80, 0x30, 0x06,", "0x08, 0x41, 0x04, 0x10, 0xC2, 0x08, 0x20, 0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24,", "'e' [ 905, 11, 17, 8, -1, -12 ], # 0x66 'f' [", "0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60,", "0x83, 0x86, 0x1C, 0x0C, 0x03, 0x80, 0x30, 0x07, 0x00, 0x80, 0xFF, 0x80, 0x00,", "12, 12, 13, 2, -11 ], # 0x55 'U' [ 717, 11, 12,", "# 0x5F '_' [ 851, 3, 3, 5, 2, -11 ], # 0x60", "[ 41, 14, 12, 15, 1, -11 ], # 0x25 '%' [ 62,", "0x10, 0x10, 0x37, 0x22, 0x24, 0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26,", "0x51 'Q' [ 650, 11, 12, 11, 0, -11 ], # 0x52 'R'", "'j' [ 977, 8, 12, 8, 0, -11 ], # 0x6B 'k' [", "0x27 ''' [ 81, 6, 15, 6, 1, -11 ], # 0x28 '('", "9, 1, -12 ], # 0x38 '8' [ 260, 9, 13, 9, 0,", "0x03, 0x01, 0x00, 0x0F, 0x84, 0x04, 0x03, 0x80, 0x60, 0x18, 0x0C, 0x06, 0x03,", "0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0, 0xC5, 0x03, 0x34, 0xE7, 0xAE,", "9, 10, 1, -8 ], # 0x3E '>' [ 312, 7, 12, 8,", "-11 ], # 0x53 'S' [ 682, 11, 12, 11, 2, -11 ],", "0x38 '8' [ 260, 9, 13, 9, 0, -12 ], # 0x39 '9'", "0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11, 0x04, 0x83, 0x20, 0x1C, 0x1B, 0x99,", "0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08, 0x04, 0x02,", "0x25 '%' [ 62, 12, 12, 14, 1, -11 ], # 0x26 '&'", "0x91, 0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00, 0x60, 0x14, 0x04,", "0x10, 0x20, 0x40, 0x82, 0x04, 0x08, 0x1C, 0x00, 0x81, 0x04, 0x18, 0x20, 0xC1,", "# 0x36 '6' [ 231, 9, 12, 9, 1, -11 ], # 0x37", "# 0x2B '+' [ 122, 2, 4, 5, 0, -1 ], # 0x2C", "0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8, 0x34,", "0x08, 0x10, 0x20, 0x40, 0x72, 0x0E, 0x08, 0x61, 0x04, 0x30, 0x86, 0x08, 0x61,", "0x04, 0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F,", "[ 514, 13, 12, 12, 0, -11 ], # 0x4B 'K' [ 534,", "12, 6, 0, -11 ], # 0x49 'I' [ 500, 9, 12, 8,", "0x87, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C,", "12, 13, 2, -11 ], # 0x55 'U' [ 717, 11, 12, 12,", "'3' [ 188, 9, 12, 9, 0, -11 ], # 0x34 '4' [", "0x78, 0x00, 0x07, 0x83, 0x18, 0xC1, 0x98, 0x36, 0x07, 0xC0, 0xF0, 0x1E, 0x06,", "0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39, 0x91, 0xCC, 0x93,", "0x6A 'j' [ 977, 8, 12, 8, 0, -11 ], # 0x6B 'k'", "0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8, 0x34, 0x05, 0x01, 0x80,", "0x03, 0x06, 0x0C, 0x00, 0x33, 0x00, 0x00, 0xCC, 0x33, 0x00, 0x00, 0x44, 0x48,", "905, 11, 17, 8, -1, -12 ], # 0x66 'f' [ 929, 9,", "0x00, 0x18, 0x18, 0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8, 0x80, 0x1F,", "'`' [ 853, 9, 8, 9, 0, -7 ], # 0x61 'a' [", "8, 12, 1, -7 ], # 0x77 'w' [ 1100, 9, 8, 8,", "0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02, 0x06, 0x0C, 0x08,", "9, 12, 9, 1, -11 ], # 0x37 '7' [ 245, 9, 13,", "[ 790, 11, 12, 10, 0, -11 ], # 0x5A 'Z' [ 807,", "-11 ], # 0x2F '/' [ 137, 9, 13, 9, 1, -12 ],", "-11 ], # 0x62 'b' [ 876, 8, 8, 7, 0, -7 ],", "0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06, 0x00, 0x0F, 0xC0,", "0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26, 0x48, 0x26, 0x98, 0x27,", "# 0x21 '!' [ 6, 5, 4, 6, 3, -11 ], # 0x22", "0x00, 0xC0, 0x83, 0x04, 0x08, 0x10, 0x60, 0x81, 0x02, 0x04, 0x70, 0x38, 0x10,", "0x80, 0x40, 0x40, 0x60, 0x20, 0x20, 0x10, 0x10, 0x18, 0x08, 0x00, 0x1E, 0x19,", "0x38, 0x78, 0x60, 0x83, 0x04, 0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D, 0x84, 0x28,", "13, 9, 1, -12 ], # 0x31 '1' [ 162, 8, 12, 9,", "0xD8, 0x31, 0x8C, 0x1E, 0x00, 0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31,", "0x20, 0x81, 0x02, 0x04, 0x10, 0x20, 0x40, 0x82, 0x04, 0x08, 0x1C, 0x00, 0x81,", "0, -11 ], # 0x33 '3' [ 188, 9, 12, 9, 0, -11", "], # 0x69 'i' [ 963, 7, 16, 5, -1, -11 ], #", "0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08, 0x61, 0x8C, 0x30, 0xC3,", "0x31, 0x8C, 0x3F, 0x04, 0xC1, 0x98, 0x31, 0x84, 0x31, 0x86, 0x78, 0x70, 0x1E,", "12, 12, 0, -11 ], # 0x58 'X' [ 775, 10, 12, 11,", "0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80, 0x42, 0x01, 0x04, 0x00, 0x3E, 0x71,", "0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78, 0x61,", "# 0x7A 'z' [ 1132, 6, 15, 7, 1, -11 ], # 0x7B", "0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F, 0xC1,", "], # 0x22 '\"' [ 9, 10, 12, 9, 0, -11 ], #", "0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x81,", "[ 468, 14, 12, 13, 0, -11 ], # 0x48 'H' [ 489,", "0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08, 0x61, 0x8C, 0x30,", "0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1, 0x80, 0x30, 0x04,", "[ 432, 12, 12, 10, 0, -11 ], # 0x46 'F' [ 450,", "0x50, 0x28, 0x18, 0x08, 0x08, 0x08, 0x18, 0x00, 0x3F, 0x42, 0x04, 0x08, 0x10,", "-7 ], # 0x3A ':' [ 279, 4, 10, 4, 1, -7 ],", "-7 ], # 0x76 'v' [ 1089, 11, 8, 12, 1, -7 ],", "], # 0x59 'Y' [ 790, 11, 12, 10, 0, -11 ], #", "0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80, 0x42, 0x01, 0x04, 0x00, 0x3E, 0x71, 0x82,", "7, 12, 6, 0, -11 ], # 0x49 'I' [ 500, 9, 12,", "0x03, 0x02, 0x06, 0x04, 0x08, 0x08, 0x10, 0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E,", "0x7F, 0xE0, 0x1C, 0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16,", "0x00, 0x38, 0x08, 0x04, 0x02, 0x03, 0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11,", "0x24, 0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20, 0x98, 0x24, 0x7F, 0xC4,", "0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C, 0x3C, 0x3E, 0x1B, 0x18, 0xF0,", "0x78, 0x61, 0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60, 0x33,", "0x01, 0x00, 0x60, 0x0C, 0x01, 0x00, 0x20, 0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E,", "0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0, 0xC5, 0x03,", "0x08, 0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00,", "'/' [ 137, 9, 13, 9, 1, -12 ], # 0x30 '0' [", "8, 9, 0, -7 ], # 0x6F 'o' [ 1025, 10, 12, 8,", "0x0C, 0x03, 0x00, 0x40, 0x18, 0x06, 0x05, 0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20,", "-11 ], # 0x21 '!' [ 6, 5, 4, 6, 3, -11 ],", "# 0x3D '=' [ 301, 9, 9, 10, 1, -8 ], # 0x3E", "[ 629, 11, 15, 12, 1, -11 ], # 0x51 'Q' [ 650,", "0x18, 0xC1, 0xB0, 0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C,", "0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02, 0x06, 0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC,", "# 0x61 'a' [ 862, 9, 12, 9, 0, -11 ], # 0x62", "-11 ], # 0x37 '7' [ 245, 9, 13, 9, 1, -12 ],", "790, 11, 12, 10, 0, -11 ], # 0x5A 'Z' [ 807, 7,", "'!' [ 6, 5, 4, 6, 3, -11 ], # 0x22 '\"' [", "6, 0, -11 ], # 0x49 'I' [ 500, 9, 12, 8, 0,", "0x33, 0x00, 0x00, 0x44, 0x48, 0x01, 0x83, 0x86, 0x1C, 0x0C, 0x03, 0x80, 0x30,", "-11 ], # 0x29 ')' [ 105, 6, 8, 9, 3, -11 ],", "0x0C, 0x01, 0x80, 0x30, 0x04, 0x01, 0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0,", "0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30, 0x31, 0x03, 0x18, 0x71, 0xFE, 0x00,", "0x93, 0x08, 0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10, 0x08, 0x00,", "0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2, 0x44, 0xC8, 0xCE,", "0x27, 0x31, 0x39, 0x91, 0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00,", "1040, 9, 12, 9, 0, -7 ], # 0x71 'q' [ 1054, 7,", "0x23, 0xFC, 0x24, 0x11, 0x04, 0x83, 0x20, 0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81,", "0, -11 ], # 0x4C 'L' [ 551, 16, 12, 15, 0, -11", "[ 650, 11, 12, 11, 0, -11 ], # 0x52 'R' [ 667,", "0x66 'f' [ 929, 9, 12, 8, 0, -7 ], # 0x67 'g'", "0x8F, 0x00, 0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40,", "0x86, 0x31, 0x87, 0xE1, 0x80, 0x30, 0x04, 0x01, 0x80, 0x78, 0x00, 0x07, 0x83,", "0x82, 0x41, 0x31, 0x0F, 0x00, 0x38, 0x08, 0x04, 0x02, 0x03, 0x39, 0x6C, 0xC6,", "0, -11 ], # 0x29 ')' [ 105, 6, 8, 9, 3, -11", "0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1, 0x80, 0x30, 0x04, 0x01, 0x80, 0x78, 0x00,", "0xD8, 0x31, 0x04, 0x13, 0x01, 0x80, 0x70, 0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21,", "8, 0, -7 ], # 0x67 'g' [ 943, 9, 12, 9, 0,", "0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06, 0x00, 0x0F, 0xC0, 0xC3, 0x0C, 0x04,", "0x08, 0x04, 0x02, 0x03, 0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0,", "5, 0, -1 ], # 0x2C ',' [ 123, 4, 1, 6, 1,", "[ 6, 5, 4, 6, 3, -11 ], # 0x22 '\"' [ 9,", "0x3E, 0x78, 0x61, 0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60,", "1074, 8, 8, 9, 1, -7 ], # 0x75 'u' [ 1082, 7,", "0x76, 0x50, 0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3,", "[ 1061, 7, 8, 6, 0, -7 ], # 0x73 's' [ 1068,", "0x18, 0x30, 0x7F, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C,", "[ 188, 9, 12, 9, 0, -11 ], # 0x34 '4' [ 202,", "4, 1, -7 ], # 0x3A ':' [ 279, 4, 10, 4, 1,", "-11 ], # 0x49 'I' [ 500, 9, 12, 8, 0, -11 ],", "0x31, 0x84, 0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0, 0x38, 0x07,", "12, 12, 1, -11 ], # 0x4F 'O' [ 612, 11, 12, 10,", "0x26, 0x98, 0x27, 0x18, 0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38, 0x78, 0x60,", "-11 ], # 0x55 'U' [ 717, 11, 12, 12, 2, -11 ],", "0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C, 0x23, 0xC8,", "8, 9, 1, -7 ], # 0x75 'u' [ 1082, 7, 8, 8,", "0x39 '9' [ 275, 4, 8, 4, 1, -7 ], # 0x3A ':'", "5, 12, 2, -6 ], # 0x3D '=' [ 301, 9, 9, 10,", "0x81, 0x18, 0x43, 0x08, 0x62, 0x0C, 0x81, 0x90, 0x14, 0x03, 0x00, 0x60, 0x08,", "11, 12, 11, 0, -11 ], # 0x52 'R' [ 667, 10, 12,", "0x0F, 0x00, 0x38, 0x08, 0x04, 0x02, 0x03, 0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21,", "0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C, 0x81, 0x18, 0x43, 0x08, 0x62, 0x0C, 0x81,", "-11 ], # 0x28 '(' [ 93, 6, 15, 6, 0, -11 ],", "9, 0, -12 ], # 0x39 '9' [ 275, 4, 8, 4, 1,", "8, -1, -7 ], # 0x78 'x' [ 1109, 9, 12, 9, 0,", "[ 284, 9, 9, 10, 1, -8 ], # 0x3C '<' [ 295,", "# 0x57 'W' [ 757, 12, 12, 12, 0, -11 ], # 0x58", "0x41, 0x04, 0x10, 0xC2, 0x08, 0x20, 0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24, 0x46,", "0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40, 0x60, 0x20, 0xF1, 0x89, 0x8C,", "0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0, 0x63,", "], # 0x79 'y' [ 1123, 8, 9, 7, 0, -7 ], #", "0x19, 0x86, 0x7F, 0x80, 0x07, 0x91, 0x86, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00,", "0x38, 0x10, 0x10, 0x10, 0x37, 0x22, 0x24, 0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73,", "-7 ], # 0x70 'p' [ 1040, 9, 12, 9, 0, -7 ],", "0x40, 0x3E, 0x03, 0x00, 0x40, 0x08, 0x01, 0x00, 0x60, 0x0C, 0x01, 0x00, 0x20,", "12, 0, -11 ], # 0x58 'X' [ 775, 10, 12, 11, 2,", "0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00, 0x60, 0x14, 0x04, 0xC0,", "[ 93, 6, 15, 6, 0, -11 ], # 0x29 ')' [ 105,", "0xB1, 0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40, 0x40, 0x60, 0x20, 0x20,", "0x19, 0x8C, 0x6C, 0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C,", "0x07, 0x00, 0x60, 0x06, 0x00, 0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9,", "8, 0, -11 ], # 0x4A 'J' [ 514, 13, 12, 12, 0,", "'Z' [ 807, 7, 15, 7, 0, -11 ], # 0x5B '[' [", "-7 ], # 0x78 'x' [ 1109, 9, 12, 9, 0, -7 ],", "0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60, 0x33, 0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0,", "0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00, 0x33, 0x00, 0x00, 0xCC,", "24, 9, 15, 9, 1, -12 ], # 0x24 '$' [ 41, 14,", "0xC0, 0x60, 0xC1, 0xE0, 0x38, 0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F, 0x00, 0x01,", "0x8E, 0x31, 0x04, 0x01, 0x80, 0x30, 0x06, 0x00, 0x80, 0x30, 0x06, 0x00, 0x80,", "# 0x73 's' [ 1068, 5, 9, 4, 0, -8 ], # 0x74", "0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x00, 0x60, 0x0F, 0x80,", "0x0C, 0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0,", "0x4C, 0x63, 0x08, 0xC0, 0x38, 0x07, 0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62,", "0x33, 0xB0, 0xE0, 0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19,", "], # 0x3C '<' [ 295, 9, 5, 12, 2, -6 ], #", "0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77, 0x39, 0x5E, 0xCC,", "# 0x35 '5' [ 216, 9, 13, 9, 1, -12 ], # 0x36", "-11 ], # 0x4D 'M' [ 575, 13, 12, 12, 0, -11 ],", "0x80, 0x04, 0x70, 0xC3, 0x08, 0x21, 0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67,", "0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0, 0x38, 0x07, 0x00, 0x60,", "0x00, 0x40, 0x3E, 0x03, 0x00, 0x40, 0x08, 0x01, 0x00, 0x60, 0x0C, 0x01, 0x00,", "-11 ], # 0x7B '[' [ 1144, 1, 12, 5, 2, -11 ],", "-11 ], # 0x23 '#' [ 24, 9, 15, 9, 1, -12 ],", "0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1, 0x98, 0x31, 0x84, 0x31, 0x86,", "0x21, 0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0, 0x36, 0x07, 0xC0, 0xF0, 0x3E,", "4, 6, 3, -11 ], # 0x22 '\"' [ 9, 10, 12, 9,", "0x02, 0x04, 0x70, 0x38, 0x10, 0x10, 0x10, 0x37, 0x22, 0x24, 0x38, 0x78, 0x48,", "0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26, 0x48, 0x26, 0x98,", "6, 1, -11 ], # 0x28 '(' [ 93, 6, 15, 6, 0,", "], # 0x23 '#' [ 24, 9, 15, 9, 1, -12 ], #", "0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66, 0x24,", "8, 9, 7, 0, -7 ], # 0x7A 'z' [ 1132, 6, 15,", "0x04, 0x30, 0x86, 0x08, 0x61, 0x04, 0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E,", "0, -1 ], # 0x2C ',' [ 123, 4, 1, 6, 1, -3", "0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20, 0xC1, 0x83, 0x04, 0x18, 0x30, 0x41,", "0x14, 0x03, 0x00, 0x60, 0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C,", "862, 9, 12, 9, 0, -11 ], # 0x62 'b' [ 876, 8,", "284, 9, 9, 10, 1, -8 ], # 0x3C '<' [ 295, 9,", "12, 6, 1, -11 ], # 0x21 '!' [ 6, 5, 4, 6,", "0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04, 0x60, 0x46, 0x04, 0x40,", "0x07, 0x00, 0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xF8, 0xC0, 0x1C, 0x03, 0x80,", "0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82, 0x04, 0x1C, 0x30, 0x40, 0x83,", "0x18, 0x00, 0x3F, 0x42, 0x04, 0x08, 0x10, 0x20, 0x40, 0x72, 0x0E, 0x08, 0x61,", "0x30, 0x06, 0x00, 0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04, 0x60,", "'A' [ 359, 11, 12, 11, 0, -11 ], # 0x42 'B' [", "0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F, 0xC1, 0x8C, 0x21, 0x8C,", "1, -8 ], # 0x2B '+' [ 122, 2, 4, 5, 0, -1", "0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x00, 0x60, 0x0F, 0x80, 0x07,", "15, 7, 1, -11 ], # 0x7B '[' [ 1144, 1, 12, 5,", "12, 9, 0, -7 ], # 0x71 'q' [ 1054, 7, 8, 7,", "6, 0, -11 ], # 0x29 ')' [ 105, 6, 8, 9, 3,", "0x11, 0x18, 0x88, 0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F, 0x84, 0x04, 0x03, 0x80,", "], # 0x56 'V' [ 734, 15, 12, 16, 2, -11 ], #", "], # 0x43 'C' [ 394, 13, 12, 13, 0, -11 ], #", "0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04, 0x60, 0x46, 0x04, 0x40, 0x8C, 0x08, 0xC0,", "0x30, 0x86, 0x08, 0x61, 0x04, 0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C,", "0x8C, 0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44,", "0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00, 0x38, 0x08, 0x04, 0x02, 0x03, 0x39, 0x6C,", "12, 1, -11 ], # 0x51 'Q' [ 650, 11, 12, 11, 0,", "0x11, 0x03, 0x10, 0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8, 0x3F, 0xF0, 0xC2, 0x08,", "1, -12 ], # 0x31 '1' [ 162, 8, 12, 9, 1, -11", "], # 0x3E '>' [ 312, 7, 12, 8, 2, -11 ], #", "0x3A, 0xEE, 0x38, 0x08, 0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21,", "0x11, 0x04, 0x83, 0x20, 0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C,", "-11 ], # 0x60 '`' [ 853, 9, 8, 9, 0, -7 ],", "9, 8, 9, 0, -7 ], # 0x6F 'o' [ 1025, 10, 12,", "# 0x31 '1' [ 162, 8, 12, 9, 1, -11 ], # 0x32", "8, 9, 0, -7 ], # 0x6E 'n' [ 1016, 9, 8, 9,", "9, 2, -11 ], # 0x5C '\\' [ 830, 6, 15, 7, 1,", "7, 0, -11 ], # 0x5B '[' [ 821, 6, 12, 9, 2,", "4, 10, 4, 1, -7 ], # 0x3B '' [ 284, 9, 9,", "0, -11 ], # 0x6B 'k' [ 989, 4, 12, 5, 1, -11", "0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8,", "0x14, 0x2C, 0x16, 0x4C, 0x26, 0x48, 0x26, 0x98, 0x27, 0x18, 0x27, 0x10, 0x42,", "0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01, 0xE0, 0xBC, 0x82, 0x41,", "0xE5, 0x40, 0x04, 0x82, 0x20, 0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24,", "0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E, 0x6A, 0x93, 0x08,", "0xFF, 0x80, 0xD8, 0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE,", "0x98, 0x4C, 0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0, 0x76,", "[ 0, 0, 0, 5, 0, 1 ], # 0x20 ' ' [", "0x18, 0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38, 0x78, 0x60, 0x83, 0x04, 0x2C,", "], # 0x57 'W' [ 757, 12, 12, 12, 0, -11 ], #", "[ 612, 11, 12, 10, 0, -11 ], # 0x50 'P' [ 629,", "# 0x6A 'j' [ 977, 8, 12, 8, 0, -11 ], # 0x6B", "0x00, 0x01, 0x01, 0x81, 0x41, 0x61, 0x21, 0x11, 0x18, 0x88, 0xFF, 0x02, 0x03,", "0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0, 0x50, 0xCC, 0xC3,", "0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E,", "0x80, 0x40, 0x60, 0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0,", "0, -11 ], # 0x45 'E' [ 432, 12, 12, 10, 0, -11", "0x04, 0x08, 0x20, 0x41, 0x38, 0x20, 0x82, 0x08, 0x41, 0x04, 0x10, 0xC2, 0x08,", "0x26, 0x64, 0x4C, 0xDE, 0x77, 0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22,", "0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0, 0x83, 0x04, 0x08, 0x10, 0x60, 0x81,", "1016, 9, 8, 9, 0, -7 ], # 0x6F 'o' [ 1025, 10,", "0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08, 0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6,", "0x64, 0xE2, 0x27, 0x31, 0x39, 0x91, 0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80,", "0xFF, 0x80, 0x00, 0x00, 0x0F, 0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18, 0x38,", "0x2C, 0x16, 0x4C, 0x26, 0x48, 0x26, 0x98, 0x27, 0x18, 0x27, 0x10, 0x42, 0x30,", "0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4, 0x19, 0x86, 0x7F,", "0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20, 0xC1, 0x83, 0x04,", "13, 12, 12, 0, -11 ], # 0x4E 'N' [ 595, 11, 12,", "0x02, 0x04, 0x10, 0x20, 0x40, 0x82, 0x04, 0x08, 0x1C, 0x00, 0x81, 0x04, 0x18,", "'\\' [ 830, 6, 15, 7, 1, -11 ], # 0x5D ']' [", "0x00, 0xE2, 0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0, 0x83, 0x04, 0x08,", "'g' [ 943, 9, 12, 9, 0, -11 ], # 0x68 'h' [", "11, 12, 10, 0, -11 ], # 0x5A 'Z' [ 807, 7, 15,", "0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66, 0x24, 0x24, 0xC3,", "5, 0, -11 ], # 0x2F '/' [ 137, 9, 13, 9, 1,", "# 0x2A '#' [ 111, 9, 9, 12, 1, -8 ], # 0x2B", "0x22, 0x11, 0x11, 0xB9, 0x8E, 0x77, 0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE,", "595, 11, 12, 12, 1, -11 ], # 0x4F 'O' [ 612, 11,", "0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0, 0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3,", "[ 359, 11, 12, 11, 0, -11 ], # 0x42 'B' [ 376,", "0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0, 0x38, 0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F,", "7, 16, 7, 0, -12 ], # 0x7D ']' [ 1160, 8, 3,", "0xC8, 0xC0, 0xD1, 0xB3, 0x5C, 0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42,", "0x04, 0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60, 0x06, 0x00,", "], # 0x70 'p' [ 1040, 9, 12, 9, 0, -7 ], #", "0, -7 ], # 0x73 's' [ 1068, 5, 9, 4, 0, -8", "-12 ], # 0x24 '$' [ 41, 14, 12, 15, 1, -11 ],", "0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8, 0x34, 0x05, 0x01,", "-1 ], # 0x2E '.' [ 125, 8, 12, 5, 0, -11 ],", "500, 9, 12, 8, 0, -11 ], # 0x4A 'J' [ 514, 13,", "0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0, 0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06,", "9, 0, -7 ], # 0x79 'y' [ 1123, 8, 9, 7, 0,", "'T' [ 699, 12, 12, 13, 2, -11 ], # 0x55 'U' [", "0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0, 0x38,", "0x40, 0x08, 0x01, 0x00, 0x60, 0x0C, 0x01, 0x00, 0x20, 0x04, 0x01, 0x00, 0xC0,", "0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77, 0x39, 0x5E, 0xCC, 0xCC,", "# 0x60 '`' [ 853, 9, 8, 9, 0, -7 ], # 0x61", "0x28, 0x30, 0xA0, 0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26,", "0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x00, 0x60, 0x0F,", "295, 9, 5, 12, 2, -6 ], # 0x3D '=' [ 301, 9,", "[ 202, 9, 12, 9, 0, -11 ], # 0x35 '5' [ 216,", "0x3F, 0x42, 0x04, 0x08, 0x10, 0x20, 0x40, 0x72, 0x0E, 0x08, 0x61, 0x04, 0x30,", "111, 9, 9, 12, 1, -8 ], # 0x2B '+' [ 122, 2,", "0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2, 0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00,", "0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C, 0x36, 0x1B, 0x0D,", "0x04, 0x70, 0xC3, 0x08, 0x21, 0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83,", "0x01, 0x01, 0x81, 0x41, 0x61, 0x21, 0x11, 0x18, 0x88, 0xFF, 0x02, 0x03, 0x01,", "0xB0, 0xE0, 0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C,", "], # 0x71 'q' [ 1054, 7, 8, 7, 0, -7 ], #", "0x42, 0x30, 0xF4, 0x7C, 0x38, 0x78, 0x60, 0x83, 0x04, 0x2C, 0x41, 0x22, 0x09,", "], # 0x54 'T' [ 699, 12, 12, 13, 2, -11 ], #", "], # 0x5D ']' [ 842, 8, 7, 8, 0, -11 ], #", "0x35 '5' [ 216, 9, 13, 9, 1, -12 ], # 0x36 '6'", "0x88, 0x83, 0x80, 0x04, 0x70, 0xC3, 0x08, 0x21, 0x86, 0x10, 0x43, 0x08, 0xF8,", "0x77, 0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E,", "0x42, 0xC6, 0x86, 0x8C, 0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10,", "0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00, 0x3F, 0xC1,", "0x4C, 0xDE, 0x77, 0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11, 0x11,", "0, -11 ], # 0x4B 'K' [ 534, 11, 12, 10, 0, -11", "0x20, 0x0C, 0x01, 0x80, 0x30, 0x04, 0x01, 0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F,", "9, 12, 9, 0, -11 ], # 0x35 '5' [ 216, 9, 13,", "3, 5, 2, -11 ], # 0x60 '`' [ 853, 9, 8, 9,", "0, -10 ], # 0x41 'A' [ 359, 11, 12, 11, 0, -11", "0x06, 0x01, 0xEF, 0x66, 0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA,", "0, -11 ], # 0x4D 'M' [ 575, 13, 12, 12, 0, -11", "[ 876, 8, 8, 7, 0, -7 ], # 0x63 'c' [ 884,", "162, 8, 12, 9, 1, -11 ], # 0x32 '2' [ 174, 9,", "0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40, 0x40, 0x60, 0x20, 0x20, 0x10, 0x10,", "0xC3, 0x61, 0xB1, 0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40, 0x40, 0x60,", "[ 122, 2, 4, 5, 0, -1 ], # 0x2C ',' [ 123,", "0, 0, 5, 0, 1 ], # 0x20 ' ' [ 0, 4,", "0x03, 0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2,", "0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18, 0x38, 0x70, 0xC0, 0x80, 0x00, 0x3C, 0x8C,", "0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1, 0x88, 0xC3, 0xC0, 0x7F, 0x40,", "0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2, 0x44, 0xC8, 0xCE, 0x06,", "876, 8, 8, 7, 0, -7 ], # 0x63 'c' [ 884, 9,", "0xC4, 0x78, 0x01, 0x80, 0x40, 0x60, 0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63,", "0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00, 0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4,", "# 0x33 '3' [ 188, 9, 12, 9, 0, -11 ], # 0x34", "# 0x52 'R' [ 667, 10, 12, 8, 0, -11 ], # 0x53", "0x10, 0x04, 0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C, 0x2C, 0x26, 0x33, 0x38, 0xEC,", "0, -12 ], # 0x7D ']' [ 1160, 8, 3, 10, 1, -5", "= [ 0x11, 0x12, 0x22, 0x24, 0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82,", "0x3C '<' [ 295, 9, 5, 12, 2, -6 ], # 0x3D '='", "0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2, 0x44,", "9, 12, 1, -8 ], # 0x2B '+' [ 122, 2, 4, 5,", "'N' [ 595, 11, 12, 12, 1, -11 ], # 0x4F 'O' [", "0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61,", "0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0, 0x38, 0x07, 0x00, 0x60, 0x0C, 0x43, 0x10,", "-11 ], # 0x48 'H' [ 489, 7, 12, 6, 0, -11 ],", "0x0E, 0x28, 0x30, 0xA0, 0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84,", "0x48, 0x01, 0x83, 0x86, 0x1C, 0x0C, 0x03, 0x80, 0x30, 0x07, 0x00, 0x80, 0xFF,", "7, 0, -7 ], # 0x7A 'z' [ 1132, 6, 15, 7, 1,", "0x08, 0x20, 0x60, 0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs = [ [ 0, 0, 0,", "0x10, 0x10, 0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0, 0x50,", "0x30, 0xC4, 0x19, 0x86, 0x7F, 0x80, 0x07, 0x91, 0x86, 0x30, 0x26, 0x02, 0x60,", "12, 8, 2, -11 ], # 0x3F '?' [ 323, 13, 12, 14,", "0x80, 0x00, 0x00, 0x0F, 0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18, 0x38, 0x70,", "'<' [ 295, 9, 5, 12, 2, -6 ], # 0x3D '=' [", "0x1C, 0x00, 0x81, 0x04, 0x18, 0x20, 0xC1, 0x04, 0x08, 0x20, 0x41, 0x38, 0x20,", "0x6B 'k' [ 989, 4, 12, 5, 1, -11 ], # 0x6C 'l'", "0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C, 0x9D, 0xEE,", "0x54 'T' [ 699, 12, 12, 13, 2, -11 ], # 0x55 'U'", "0x0F, 0x81, 0x80, 0x80, 0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E,", "# 0x40 '@' [ 343, 11, 11, 12, 0, -10 ], # 0x41", "0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66, 0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3,", "0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08, 0x61,", "[ 534, 11, 12, 10, 0, -11 ], # 0x4C 'L' [ 551,", "8, 9, 0, -7 ], # 0x61 'a' [ 862, 9, 12, 9,", "0x2A '#' [ 111, 9, 9, 12, 1, -8 ], # 0x2B '+'", "0x80, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30,", "[ 162, 8, 12, 9, 1, -11 ], # 0x32 '2' [ 174,", "'G' [ 468, 14, 12, 13, 0, -11 ], # 0x48 'H' [", "], # 0x51 'Q' [ 650, 11, 12, 11, 0, -11 ], #", "-6 ], # 0x3D '=' [ 301, 9, 9, 10, 1, -8 ],", "0x30, 0x02, 0x00, 0x60, 0x0F, 0x80, 0x07, 0x91, 0x87, 0x30, 0x26, 0x02, 0x60,", "0, -11 ], # 0x23 '#' [ 24, 9, 15, 9, 1, -12", "0x1E, 0x00, 0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1, 0x80,", "0x00, 0x40, 0x18, 0x06, 0x05, 0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02,", "0x79 'y' [ 1123, 8, 9, 7, 0, -7 ], # 0x7A 'z'", "0x41, 0x31, 0x0F, 0x00, 0x38, 0x08, 0x04, 0x02, 0x03, 0x39, 0x6C, 0xC6, 0x46,", "0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E, 0x77, 0x3B, 0x33,", "[ 699, 12, 12, 13, 2, -11 ], # 0x55 'U' [ 717,", "1054, 7, 8, 7, 0, -7 ], # 0x72 'r' [ 1061, 7,", "# 0x34 '4' [ 202, 9, 12, 9, 0, -11 ], # 0x35", "], # 0x24 '$' [ 41, 14, 12, 15, 1, -11 ], #", "0x7F, 0x40, 0x80, 0x80, 0x40, 0x40, 0x60, 0x20, 0x20, 0x10, 0x10, 0x18, 0x08,", "0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65, 0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0,", "-7 ], # 0x61 'a' [ 862, 9, 12, 9, 0, -11 ],", "0x18, 0x43, 0x08, 0x62, 0x0C, 0x81, 0x90, 0x14, 0x03, 0x00, 0x60, 0x08, 0x00,", "0x53 'S' [ 682, 11, 12, 11, 2, -11 ], # 0x54 'T'", "0x20, 0x20, 0x10, 0x10, 0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0,", "# 0x30 '0' [ 152, 6, 13, 9, 1, -12 ], # 0x31", "0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0, 0x50, 0xCC, 0xC3, 0x61, 0xB0,", "0x80, 0x78, 0x00, 0x07, 0x83, 0x18, 0xC1, 0x98, 0x36, 0x07, 0xC0, 0xF0, 0x1E,", "[ 260, 9, 13, 9, 0, -12 ], # 0x39 '9' [ 275,", "0x80, 0x07, 0x91, 0x87, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C,", "0x22 '\"' [ 9, 10, 12, 9, 0, -11 ], # 0x23 '#'", "807, 7, 15, 7, 0, -11 ], # 0x5B '[' [ 821, 6,", "# 0x66 'f' [ 929, 9, 12, 8, 0, -7 ], # 0x67", "0x04, 0x08, 0x10, 0x20, 0x40, 0x72, 0x0E, 0x08, 0x61, 0x04, 0x30, 0x86, 0x08,", "0x20, 0xC1, 0x83, 0x04, 0x18, 0x30, 0x41, 0x87, 0x80, 0x0F, 0x81, 0x80, 0x80,", "'a' [ 862, 9, 12, 9, 0, -11 ], # 0x62 'b' [", "0x43, 0x38, 0x86, 0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80, 0x42,", "0, -7 ], # 0x79 'y' [ 1123, 8, 9, 7, 0, -7", "# 0x54 'T' [ 699, 12, 12, 13, 2, -11 ], # 0x55", "821, 6, 12, 9, 2, -11 ], # 0x5C '\\' [ 830, 6,", "0x80, 0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78,", "-11 ], # 0x5B '[' [ 821, 6, 12, 9, 2, -11 ],", "12, 12, 0, -11 ], # 0x4B 'K' [ 534, 11, 12, 10,", "0x10, 0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0, 0x50, 0xCC,", "[ 124, 2, 2, 5, 0, -1 ], # 0x2E '.' [ 125,", "0x67 'g' [ 943, 9, 12, 9, 0, -11 ], # 0x68 'h'", "0x03, 0x03, 0x06, 0x0C, 0x00, 0x33, 0x00, 0x00, 0xCC, 0x33, 0x00, 0x00, 0x44,", "# 0x24 '$' [ 41, 14, 12, 15, 1, -11 ], # 0x25", "-5 ] ] # 0x7E '~' FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps, FreeSerifItalic9pt7bGlyphs, 0x20, 0x7E,", "0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F, 0x07, 0x87, 0xC3,", "0, -1 ], # 0x2E '.' [ 125, 8, 12, 5, 0, -11", "# 0x2D '-' [ 124, 2, 2, 5, 0, -1 ], # 0x2E", "1144, 1, 12, 5, 2, -11 ], # 0x7C '|' [ 1146, 7,", "[ 245, 9, 13, 9, 1, -12 ], # 0x38 '8' [ 260,", "0x60, 0x83, 0x04, 0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D, 0x84, 0x28, 0x21, 0x41,", "0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08, 0x04, 0x02, 0x03, 0x71, 0xCC,", "8, 4, 1, -7 ], # 0x3A ':' [ 279, 4, 10, 4,", "[ 105, 6, 8, 9, 3, -11 ], # 0x2A '#' [ 111,", "0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02, 0x04, 0x10, 0x20, 0x40, 0x82,", "0x20, 0x10, 0x10, 0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0,", "0x4C, 0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0, 0x76, 0x50,", "5, -1, -11 ], # 0x6A 'j' [ 977, 8, 12, 8, 0,", "0x01, 0x80, 0x78, 0x00, 0x07, 0x83, 0x18, 0xC1, 0x98, 0x36, 0x07, 0xC0, 0xF0,", "[ 81, 6, 15, 6, 1, -11 ], # 0x28 '(' [ 93,", "'0' [ 152, 6, 13, 9, 1, -12 ], # 0x31 '1' [", "0x3F, 0x04, 0xC1, 0x98, 0x31, 0x84, 0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63,", "'F' [ 450, 12, 12, 12, 1, -11 ], # 0x47 'G' [", "10, 12, 8, 0, -11 ], # 0x53 'S' [ 682, 11, 12,", "0x18, 0xC1, 0x98, 0x36, 0x07, 0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04,", "0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01, 0x80, 0x30, 0x04, 0x01, 0x80, 0x30, 0x04,", "0x10, 0x20, 0x81, 0x02, 0x04, 0x10, 0x20, 0x40, 0x82, 0x04, 0x08, 0x1C, 0x00,", "12, 11, 1, -11 ], # 0x43 'C' [ 394, 13, 12, 13,", "0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C, 0x23, 0xC8, 0xC4,", "0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26, 0x20, 0x99, 0x84, 0x3C, 0x03, 0x80,", "0x33, 0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01,", "0x03, 0xE0, 0x3F, 0xE4, 0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40,", "0x18, 0x06, 0x05, 0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02, 0x04, 0x10,", "0x86, 0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80, 0x42, 0x01, 0x04,", "15, 6, 1, -11 ], # 0x28 '(' [ 93, 6, 15, 6,", "0x02, 0x00, 0x10, 0x40, 0x82, 0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08,", "# 0x78 'x' [ 1109, 9, 12, 9, 0, -7 ], # 0x79", "'z' [ 1132, 6, 15, 7, 1, -11 ], # 0x7B '[' [", "-11 ], # 0x6A 'j' [ 977, 8, 12, 8, 0, -11 ],", "[ 0, 4, 12, 6, 1, -11 ], # 0x21 '!' [ 6,", "0x80, 0x30, 0x04, 0x01, 0x80, 0x78, 0x00, 0x07, 0x83, 0x18, 0xC1, 0x98, 0x36,", "0x21 '!' [ 6, 5, 4, 6, 3, -11 ], # 0x22 '\"'", "0x32 '2' [ 174, 9, 12, 9, 0, -11 ], # 0x33 '3'", "[ 125, 8, 12, 5, 0, -11 ], # 0x2F '/' [ 137,", "-1 ], # 0x2C ',' [ 123, 4, 1, 6, 1, -3 ],", "0x40, 0x60, 0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E,", "0x58 'X' [ 775, 10, 12, 11, 2, -11 ], # 0x59 'Y'", "4, 12, 4, 1, -11 ], # 0x69 'i' [ 963, 7, 16,", "0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00, 0x1F, 0x33,", "[ 1109, 9, 12, 9, 0, -7 ], # 0x79 'y' [ 1123,", "0x13, 0x49, 0xA4, 0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28,", "12, 9, 0, -11 ], # 0x35 '5' [ 216, 9, 13, 9,", "# 0x41 'A' [ 359, 11, 12, 11, 0, -11 ], # 0x42", "0xA0, 0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26, 0x20, 0x99,", "-3 ], # 0x2D '-' [ 124, 2, 2, 5, 0, -1 ],", "0xCC, 0xE1, 0xC3, 0x01, 0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00, 0x38, 0x08,", "0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E, 0x77, 0x3B,", "# 0x75 'u' [ 1082, 7, 8, 8, 1, -7 ], # 0x76", "667, 10, 12, 8, 0, -11 ], # 0x53 'S' [ 682, 11,", "0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18,", "'?' [ 323, 13, 12, 14, 1, -11 ], # 0x40 '@' [", "[ 734, 15, 12, 16, 2, -11 ], # 0x57 'W' [ 757,", "0x0C, 0x10, 0xE2, 0x2C, 0x44, 0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10,", "0x4D, 0x84, 0x28, 0x21, 0x41, 0x06, 0x10, 0x21, 0xE1, 0x00, 0x07, 0x83, 0x18,", "6, 15, 7, 1, -11 ], # 0x5D ']' [ 842, 8, 7,", "0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60, 0x06, 0x00, 0xB0,", "0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4, 0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C,", "0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01, 0x80,", "0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40, 0x60, 0x20,", "0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18, 0x38, 0x70, 0xC0, 0x80, 0x00, 0x3C,", "0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2, 0x44, 0xC8, 0xCE, 0x06, 0x00,", "[ 279, 4, 10, 4, 1, -7 ], # 0x3B '' [ 284,", "-7 ], # 0x63 'c' [ 884, 9, 12, 9, 0, -11 ],", "0xC0, 0x76, 0x50, 0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1,", "0x18, 0x98, 0x4C, 0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0,", "], # 0x76 'v' [ 1089, 11, 8, 12, 1, -7 ], #", "0x41, 0x22, 0x09, 0x10, 0x4D, 0x84, 0x28, 0x21, 0x41, 0x06, 0x10, 0x21, 0xE1,", "15, 12, 1, -11 ], # 0x51 'Q' [ 650, 11, 12, 11,", "0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00, 0x3F,", "0x03, 0x03, 0x03, 0x1E, 0x00, 0x01, 0x83, 0x87, 0x07, 0x03, 0x03, 0x73, 0xCD,", "'#' [ 24, 9, 15, 9, 1, -12 ], # 0x24 '$' [", "0x20, 0x10, 0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04, 0x08, 0x08,", "'^' [ 849, 9, 1, 9, 0, 2 ], # 0x5F '_' [", "13, 12, 13, 0, -11 ], # 0x44 'D' [ 414, 12, 12,", "0x84, 0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E, 0x6A, 0x93, 0x08, 0x08, 0x04, 0x02,", "9, 4, 0, -8 ], # 0x74 't' [ 1074, 8, 8, 9,", "13, 9, 0, -12 ], # 0x39 '9' [ 275, 4, 8, 4,", "414, 12, 12, 10, 0, -11 ], # 0x45 'E' [ 432, 12,", "4, 12, 6, 1, -11 ], # 0x21 '!' [ 6, 5, 4,", "0x80, 0x0F, 0x18, 0x98, 0x4C, 0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04, 0x02, 0x03,", "[ 376, 12, 12, 11, 1, -11 ], # 0x43 'C' [ 394,", "], # 0x7A 'z' [ 1132, 6, 15, 7, 1, -11 ], #", "0x30, 0xCA, 0x5E, 0x6A, 0x93, 0x08, 0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40,", "0x05, 0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02, 0x04, 0x10, 0x20, 0x40,", "0x00, 0x81, 0x04, 0x18, 0x20, 0xC1, 0x04, 0x08, 0x20, 0x41, 0x38, 0x20, 0x82,", "0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78, 0x61, 0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E,", "0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1, 0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80,", "0x36, 0x1E, 0x00, 0x3E, 0x78, 0x61, 0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00,", "'s' [ 1068, 5, 9, 4, 0, -8 ], # 0x74 't' [", "-11 ], # 0x25 '%' [ 62, 12, 12, 14, 1, -11 ],", "0x04, 0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82,", "1068, 5, 9, 4, 0, -8 ], # 0x74 't' [ 1074, 8,", "0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4, 0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C,", "[ 905, 11, 17, 8, -1, -12 ], # 0x66 'f' [ 929,", "], # 0x4A 'J' [ 514, 13, 12, 12, 0, -11 ], #", "0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0, 0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1,", "12, 12, 10, 0, -11 ], # 0x46 'F' [ 450, 12, 12,", "1, -11 ], # 0x7B '[' [ 1144, 1, 12, 5, 2, -11", "0x83, 0x87, 0x07, 0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1, 0x88, 0xC3,", "0xE2, 0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0, 0x83, 0x04, 0x08, 0x10,", "0x72 'r' [ 1061, 7, 8, 6, 0, -7 ], # 0x73 's'", "0x7A 'z' [ 1132, 6, 15, 7, 1, -11 ], # 0x7B '['", "0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06, 0x00, 0x0F, 0xC0, 0xC3, 0x0C, 0x04, 0xC7,", "0x00, 0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1, 0x80, 0x30,", "0x0E, 0x08, 0x61, 0x04, 0x30, 0x86, 0x08, 0x61, 0x04, 0x30, 0xC3, 0x8F, 0x00,", "5, 0, -1 ], # 0x2E '.' [ 125, 8, 12, 5, 0,", "0x00, 0xCC, 0x33, 0x00, 0x00, 0x44, 0x48, 0x01, 0x83, 0x86, 0x1C, 0x0C, 0x03,", "-7 ], # 0x72 'r' [ 1061, 7, 8, 6, 0, -7 ],", "0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C, 0x23,", "137, 9, 13, 9, 1, -12 ], # 0x30 '0' [ 152, 6,", "[ 851, 3, 3, 5, 2, -11 ], # 0x60 '`' [ 853,", "0x01, 0x01, 0x1F, 0x00, 0x01, 0x01, 0x81, 0x41, 0x61, 0x21, 0x11, 0x18, 0x88,", "-7 ], # 0x79 'y' [ 1123, 8, 9, 7, 0, -7 ],", "# 0x49 'I' [ 500, 9, 12, 8, 0, -11 ], # 0x4A", "0x03, 0x00, 0x40, 0x18, 0x06, 0x05, 0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81,", "0x1C, 0x03, 0x80, 0x70, 0x18, 0x38, 0x70, 0xC0, 0x80, 0x00, 0x3C, 0x8C, 0x18,", "0x99, 0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66,", "0x00, 0x07, 0x83, 0x18, 0xC1, 0x98, 0x36, 0x07, 0xC0, 0xF0, 0x1E, 0x06, 0xC0,", "0x46 'F' [ 450, 12, 12, 12, 1, -11 ], # 0x47 'G'", "], # 0x73 's' [ 1068, 5, 9, 4, 0, -8 ], #", "[ 667, 10, 12, 8, 0, -11 ], # 0x53 'S' [ 682,", "0x80, 0x01, 0x00, 0x60, 0x14, 0x04, 0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04, 0x20,", "1, -7 ], # 0x75 'u' [ 1082, 7, 8, 8, 1, -7", "8, 7, 0, -7 ], # 0x72 'r' [ 1061, 7, 8, 6,", "''' [ 81, 6, 15, 6, 1, -11 ], # 0x28 '(' [", "0xEF, 0x66, 0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21,", "[ 394, 13, 12, 13, 0, -11 ], # 0x44 'D' [ 414,", "0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1, 0x98, 0x31,", "0x01, 0x80, 0x20, 0x0C, 0x01, 0x80, 0x30, 0x04, 0x01, 0x80, 0x30, 0x04, 0x0D,", "5, 1, -11 ], # 0x6C 'l' [ 995, 13, 8, 13, 0,", "0xC1, 0xE0, 0x38, 0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F, 0x00, 0x01, 0x01, 0x81,", "0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10,", "0x60, 0x0F, 0x80, 0x07, 0x91, 0x87, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC1,", "0x10, 0x20, 0xC1, 0x82, 0x04, 0x1C, 0x30, 0x40, 0x83, 0x04, 0x08, 0x20, 0x60,", "-11 ], # 0x58 'X' [ 775, 10, 12, 11, 2, -11 ],", "0x02, 0x03, 0x01, 0x00, 0x0F, 0x84, 0x04, 0x03, 0x80, 0x60, 0x18, 0x0C, 0x06,", "0x30, 0x04, 0x01, 0x80, 0x78, 0x00, 0x07, 0x83, 0x18, 0xC1, 0x98, 0x36, 0x07,", "-11 ], # 0x27 ''' [ 81, 6, 15, 6, 1, -11 ],", "9, 0, -11 ], # 0x35 '5' [ 216, 9, 13, 9, 1,", "], # 0x30 '0' [ 152, 6, 13, 9, 1, -12 ], #", "0x04, 0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C, 0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04,", "0x70, 0x1C, 0x13, 0x49, 0xA4, 0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF,", "'&' [ 80, 2, 4, 4, 3, -11 ], # 0x27 ''' [", "0x20, 0x41, 0x38, 0x20, 0x82, 0x08, 0x41, 0x04, 0x10, 0xC2, 0x08, 0x20, 0x8C,", "0x08, 0x10, 0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F,", "6, 0, -7 ], # 0x73 's' [ 1068, 5, 9, 4, 0,", "0x38, 0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0, 0x76, 0x50, 0xC1, 0x06, 0x08, 0x10,", "# 0x4D 'M' [ 575, 13, 12, 12, 0, -11 ], # 0x4E", "0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C, 0x9D,", "12, 16, 2, -11 ], # 0x57 'W' [ 757, 12, 12, 12,", "13, 0, -7 ], # 0x6D 'm' [ 1008, 8, 8, 9, 0,", "9, 0, -7 ], # 0x71 'q' [ 1054, 7, 8, 7, 0,", "0x03, 0xC0, 0xF8, 0xEC, 0x0C, 0x81, 0x18, 0x43, 0x08, 0x62, 0x0C, 0x81, 0x90,", "], # 0x26 '&' [ 80, 2, 4, 4, 3, -11 ], #", "957, 4, 12, 4, 1, -11 ], # 0x69 'i' [ 963, 7,", "0x40, 0x08, 0x02, 0x00, 0x40, 0x3E, 0x03, 0x00, 0x40, 0x08, 0x01, 0x00, 0x60,", "0xC3, 0x08, 0x21, 0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02,", "0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26, 0x48, 0x26,", "8, 8, 7, 0, -7 ], # 0x63 'c' [ 884, 9, 12,", "0x40, 0x82, 0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08, 0x00, 0x30, 0xCA,", "0x83, 0x04, 0x08, 0x20, 0x60, 0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs = [ [ 0,", "-11 ], # 0x40 '@' [ 343, 11, 11, 12, 0, -10 ],", "9, 1, -12 ], # 0x36 '6' [ 231, 9, 12, 9, 1,", "0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82, 0x04, 0x1C,", "0x31, 0x04, 0x13, 0x01, 0x80, 0x70, 0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C,", "0xD8, 0xCC, 0xE1, 0xC3, 0x01, 0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00, 0x38,", "0x60, 0x46, 0x04, 0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8,", "0xD8, 0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08,", "], # 0x40 '@' [ 343, 11, 11, 12, 0, -10 ], #", "0x60, 0x81, 0x02, 0x04, 0x70, 0x38, 0x10, 0x10, 0x10, 0x37, 0x22, 0x24, 0x38,", "0x31, 0x39, 0x91, 0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00, 0x60,", "11, 12, 0, -10 ], # 0x41 'A' [ 359, 11, 12, 11,", "9, 12, 9, 0, -11 ], # 0x64 'd' [ 898, 7, 8,", "0xCE, 0x0F, 0x18, 0xD8, 0x7C, 0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31,", "0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0, 0x3E, 0x3C, 0x30, 0x60, 0x81,", "13, 2, -11 ], # 0x55 'U' [ 717, 11, 12, 12, 2,", "0x03, 0x03, 0xE0, 0x3F, 0xE4, 0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00,", "1, -7 ], # 0x3B '' [ 284, 9, 9, 10, 1, -8", "0xDE, 0x77, 0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11, 0x11, 0xB9,", "0x33, 0xAE, 0xE0, 0x0E, 0x65, 0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40,", "7, 0, -7 ], # 0x72 'r' [ 1061, 7, 8, 6, 0,", "], # 0x53 'S' [ 682, 11, 12, 11, 2, -11 ], #", "0x83, 0x04, 0x08, 0x10, 0x60, 0x81, 0x02, 0x04, 0x70, 0x38, 0x10, 0x10, 0x10,", "-1, -7 ], # 0x78 'x' [ 1109, 9, 12, 9, 0, -7", "0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39, 0x91, 0xCC, 0x93, 0x3B, 0x0E,", "0xC0, 0x63, 0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30,", "0x60, 0x20, 0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78, 0x61, 0x82,", "0x23, 0x20, 0x90, 0x50, 0x28, 0x18, 0x08, 0x08, 0x08, 0x18, 0x00, 0x3F, 0x42,", "0, -11 ], # 0x48 'H' [ 489, 7, 12, 6, 0, -11", "0x27, 0x18, 0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38, 0x78, 0x60, 0x83, 0x04,", "8, 8, 9, 1, -7 ], # 0x75 'u' [ 1082, 7, 8,", "0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30, 0x31, 0x03, 0x18, 0x71,", "0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08, 0x04,", "4, 1, 6, 1, -3 ], # 0x2D '-' [ 124, 2, 2,", "-11 ], # 0x4B 'K' [ 534, 11, 12, 10, 0, -11 ],", "-11 ], # 0x2A '#' [ 111, 9, 9, 12, 1, -8 ],", "0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31, 0x88, 0x62,", "0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60, 0x06, 0x00, 0xB0, 0x13, 0x02, 0x18,", "0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0, 0x83, 0x04, 0x08, 0x10, 0x60,", "11, 12, 10, 0, -11 ], # 0x50 'P' [ 629, 11, 15,", "0x3F, 0xE4, 0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40, 0x18, 0x06,", "0x41, 0x06, 0x10, 0x21, 0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0, 0x36, 0x07,", "0x30, 0x60, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06, 0x0C, 0x18, 0x30,", "93, 6, 15, 6, 0, -11 ], # 0x29 ')' [ 105, 6,", "2, -11 ], # 0x60 '`' [ 853, 9, 8, 9, 0, -7", "0x65, 0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02, 0x00, 0x40,", "0x18, 0x20, 0xC1, 0x83, 0x04, 0x18, 0x30, 0x41, 0x87, 0x80, 0x0F, 0x81, 0x80,", "0x02, 0x30, 0x46, 0x04, 0x60, 0x46, 0x04, 0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10,", "0x1E, 0x00, 0x01, 0x83, 0x87, 0x07, 0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61,", "14, 12, 15, 1, -11 ], # 0x25 '%' [ 62, 12, 12,", "0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8, 0x34, 0x05, 0x01, 0x80, 0x40,", "0x1E, 0x18, 0x20, 0xC1, 0x83, 0x04, 0x18, 0x30, 0x41, 0x87, 0x80, 0x0F, 0x81,", "977, 8, 12, 8, 0, -11 ], # 0x6B 'k' [ 989, 4,", "0x80, 0x60, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E, 0x00, 0x01, 0x83, 0x87,", "0x80, 0x30, 0x07, 0x00, 0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xF8, 0xC0, 0x1C,", "12, 12, 11, 1, -11 ], # 0x43 'C' [ 394, 13, 12,", "0x22, 0x24, 0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE,", "0x1C, 0x30, 0x40, 0x83, 0x04, 0x08, 0x20, 0x60, 0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs =", "0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4, 0xDA, 0xC7,", "0x5F '_' [ 851, 3, 3, 5, 2, -11 ], # 0x60 '`'", "0x40, 0x82, 0x04, 0x08, 0x1C, 0x00, 0x81, 0x04, 0x18, 0x20, 0xC1, 0x04, 0x08,", "0x40, 0x60, 0x20, 0x20, 0x10, 0x10, 0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66,", "# 0x74 't' [ 1074, 8, 8, 9, 1, -7 ], # 0x75", "1, -3 ], # 0x2D '-' [ 124, 2, 2, 5, 0, -1", "0x78, 0x60, 0x83, 0x04, 0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D, 0x84, 0x28, 0x21,", "0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00, 0x1F,", "0xC6, 0x62, 0x70, 0x7F, 0xE9, 0x8E, 0x31, 0x04, 0x01, 0x80, 0x30, 0x06, 0x00,", "0x91, 0x87, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC,", "0x61 'a' [ 862, 9, 12, 9, 0, -11 ], # 0x62 'b'", "775, 10, 12, 11, 2, -11 ], # 0x59 'Y' [ 790, 11,", "0x08, 0x10, 0x60, 0x81, 0x02, 0x04, 0x70, 0x38, 0x10, 0x10, 0x10, 0x37, 0x22,", "0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30,", "0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40, 0x18, 0x06, 0x05, 0x81, 0x7F, 0xE0, 0x0E,", "], # 0x4F 'O' [ 612, 11, 12, 10, 0, -11 ], #", "0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10, 0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03,", "0x06, 0x0C, 0x18, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20,", "13, 0, -11 ], # 0x48 'H' [ 489, 7, 12, 6, 0,", "0x0C, 0x00, 0x33, 0x00, 0x00, 0xCC, 0x33, 0x00, 0x00, 0x44, 0x48, 0x01, 0x83,", "[ 414, 12, 12, 10, 0, -11 ], # 0x45 'E' [ 432,", "12, 13, 0, -11 ], # 0x44 'D' [ 414, 12, 12, 10,", "0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66, 0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87,", "432, 12, 12, 10, 0, -11 ], # 0x46 'F' [ 450, 12,", "0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31, 0x88, 0x62, 0x30, 0xF0, 0x60, 0x10, 0x04,", "0x02, 0x00, 0x60, 0x0F, 0x80, 0x07, 0x91, 0x87, 0x30, 0x26, 0x02, 0x60, 0x0C,", "0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00, 0x33, 0x00, 0x00, 0xCC, 0x33, 0x00,", "0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65, 0x8B, 0x2F, 0x98, 0x31, 0x3C,", "-12 ], # 0x7D ']' [ 1160, 8, 3, 10, 1, -5 ]", "0x07, 0x91, 0x86, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0xC0,", "'6' [ 231, 9, 12, 9, 1, -11 ], # 0x37 '7' [", "0x30 '0' [ 152, 6, 13, 9, 1, -12 ], # 0x31 '1'", "0x77, 0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C, 0x3C,", "0xC0, 0x0E, 0x19, 0x8C, 0x6C, 0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03,", "0x8F, 0x80, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86,", "0x03, 0x02, 0x06, 0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60,", "0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4, 0x19, 0x86, 0x7F, 0x80, 0x07, 0x91,", "0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0, 0x83, 0x04, 0x08, 0x10, 0x60, 0x81, 0x02,", "0x0F, 0x18, 0x98, 0x4C, 0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04, 0x02, 0x03, 0x03,", "0x05, 0x84, 0x26, 0x20, 0x99, 0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78,", "0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C, 0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3,", "-1, -11 ], # 0x6A 'j' [ 977, 8, 12, 8, 0, -11", "0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0, 0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40,", "0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x00, 0x60, 0x0F, 0x80, 0x07, 0x91,", "0x28, 0x21, 0x41, 0x06, 0x10, 0x21, 0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0,", "323, 13, 12, 14, 1, -11 ], # 0x40 '@' [ 343, 11,", "] # 0x7E '~' FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps, FreeSerifItalic9pt7bGlyphs, 0x20, 0x7E, 22 ]", "0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F, 0x07, 0x87, 0xC3, 0x61,", "], # 0x28 '(' [ 93, 6, 15, 6, 0, -11 ], #", "0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38, 0x78, 0x60, 0x83, 0x04, 0x2C, 0x41,", "], # 0x31 '1' [ 162, 8, 12, 9, 1, -11 ], #", "0x00, 0x0F, 0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39,", "12, 9, 1, -11 ], # 0x32 '2' [ 174, 9, 12, 9,", "0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4, 0x19,", "12, 8, 0, -7 ], # 0x67 'g' [ 943, 9, 12, 9,", "0x0F, 0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18, 0x38, 0x70, 0xC0, 0x80, 0x00,", "-12 ], # 0x30 '0' [ 152, 6, 13, 9, 1, -12 ],", "0x10, 0x10, 0x10, 0x37, 0x22, 0x24, 0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32,", "0x58, 0x06, 0x60, 0x33, 0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80,", "0x8E, 0x77, 0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C,", "14, 12, 13, 0, -11 ], # 0x48 'H' [ 489, 7, 12,", "0, -7 ], # 0x72 'r' [ 1061, 7, 8, 6, 0, -7", "], # 0x66 'f' [ 929, 9, 12, 8, 0, -7 ], #", "9, 0, -7 ], # 0x61 'a' [ 862, 9, 12, 9, 0,", "'E' [ 432, 12, 12, 10, 0, -11 ], # 0x46 'F' [", "534, 11, 12, 10, 0, -11 ], # 0x4C 'L' [ 551, 16,", "0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19,", "15, 1, -11 ], # 0x25 '%' [ 62, 12, 12, 14, 1,", "0x08, 0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10, 0x08, 0x00, 0x56,", "0x60, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x60,", "0xAE, 0xE0, 0x0E, 0x65, 0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08,", "0x00, 0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8,", "0x6F 'o' [ 1025, 10, 12, 8, -1, -7 ], # 0x70 'p'", "0xF3, 0x02, 0x30, 0x46, 0x04, 0x60, 0x46, 0x04, 0x40, 0x8C, 0x08, 0xC0, 0x8C,", "[ 500, 9, 12, 8, 0, -11 ], # 0x4A 'J' [ 514,", "0x66, 0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E, 0x77, 0x3B, 0x33, 0x62, 0x62, 0x42,", "12, 11, 0, -11 ], # 0x42 'B' [ 376, 12, 12, 11,", "0x20, 0x40, 0x80, 0x06, 0x00, 0x0F, 0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64,", "0x30, 0x46, 0x04, 0x60, 0x46, 0x04, 0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3,", "], # 0x5A 'Z' [ 807, 7, 15, 7, 0, -11 ], #", "0x91, 0x8E, 0x70, 0x88, 0x46, 0x23, 0x20, 0x90, 0x50, 0x28, 0x18, 0x08, 0x08,", "[ 1144, 1, 12, 5, 2, -11 ], # 0x7C '|' [ 1146,", "12, 10, 0, -11 ], # 0x5A 'Z' [ 807, 7, 15, 7,", "-11 ], # 0x54 'T' [ 699, 12, 12, 13, 2, -11 ],", "0x14, 0x04, 0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80,", "0x3E, 0x03, 0x00, 0x40, 0x08, 0x01, 0x00, 0x60, 0x0C, 0x01, 0x00, 0x20, 0x04,", "4, 0, -8 ], # 0x74 't' [ 1074, 8, 8, 9, 1,", "0, -11 ], # 0x68 'h' [ 957, 4, 12, 4, 1, -11", "0xF8, 0x40, 0x20, 0x10, 0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04,", "0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60, 0x06, 0x00, 0xB0, 0x13, 0x02,", "0x26, 0x33, 0x38, 0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0, 0x76, 0x50, 0xC1, 0x06,", "0, 4, 12, 6, 1, -11 ], # 0x21 '!' [ 6, 5,", "0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80, 0x42, 0x01, 0x04, 0x00,", "6, 15, 6, 0, -11 ], # 0x29 ')' [ 105, 6, 8,", "0x3E '>' [ 312, 7, 12, 8, 2, -11 ], # 0x3F '?'", "0x81, 0x41, 0x61, 0x21, 0x11, 0x18, 0x88, 0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F,", "0x04, 0x60, 0x8F, 0xF8, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8,", "9, 5, 12, 2, -6 ], # 0x3D '=' [ 301, 9, 9,", "0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00, 0x33, 0x00, 0x00, 0xCC, 0x33, 0x00, 0x00,", "0x28 '(' [ 93, 6, 15, 6, 0, -11 ], # 0x29 ')'", "# 0x76 'v' [ 1089, 11, 8, 12, 1, -7 ], # 0x77", "0x10, 0x60, 0x81, 0x02, 0x04, 0x70, 0x38, 0x10, 0x10, 0x10, 0x37, 0x22, 0x24,", "'w' [ 1100, 9, 8, 8, -1, -7 ], # 0x78 'x' [", "0x26 '&' [ 80, 2, 4, 4, 3, -11 ], # 0x27 '''", "0x08, 0x02, 0x00, 0x40, 0x3E, 0x03, 0x00, 0x40, 0x08, 0x01, 0x00, 0x60, 0x0C,", "0x22, 0x09, 0x10, 0x4D, 0x84, 0x28, 0x21, 0x41, 0x06, 0x10, 0x21, 0xE1, 0x00,", "0xE0, 0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C,", "0, -11 ], # 0x46 'F' [ 450, 12, 12, 12, 1, -11", "0xE0, 0x38, 0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F, 0x00, 0x01, 0x01, 0x81, 0x41,", "0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0, 0x50, 0xCC, 0xC3, 0x61,", "0x6E 'n' [ 1016, 9, 8, 9, 0, -7 ], # 0x6F 'o'", "0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00, 0x10, 0x40, 0x82, 0x0C, 0x30,", "8, 9, 3, -11 ], # 0x2A '#' [ 111, 9, 9, 12,", "'y' [ 1123, 8, 9, 7, 0, -7 ], # 0x7A 'z' [", "0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01, 0x80, 0x70, 0xB7, 0xE0, 0x3F,", "0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00, 0x33, 0x00, 0x00,", "0x31, 0x87, 0xE1, 0x80, 0x30, 0x04, 0x01, 0x80, 0x78, 0x00, 0x07, 0x83, 0x18,", "0x00, 0x3E, 0x78, 0x61, 0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06,", "0x60, 0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs = [ [ 0, 0, 0, 5, 0,", "-7 ], # 0x6F 'o' [ 1025, 10, 12, 8, -1, -7 ],", "0x80, 0xD8, 0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38,", "0x60, 0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65,", "0x41, 0x38, 0x20, 0x82, 0x08, 0x41, 0x04, 0x10, 0xC2, 0x08, 0x20, 0x8C, 0x00,", "0x21, 0x80, 0x42, 0x01, 0x04, 0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07,", "0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0, 0x3E,", "0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65, 0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01,", "0x31, 0x8C, 0x1E, 0x00, 0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31, 0x87,", "# 0x6D 'm' [ 1008, 8, 8, 9, 0, -7 ], # 0x6E", "0x11, 0x03, 0x10, 0x30, 0x02, 0x00, 0x60, 0x0F, 0x80, 0x07, 0x91, 0x87, 0x30,", "0x00, 0x33, 0x00, 0x00, 0xCC, 0x33, 0x00, 0x00, 0x44, 0x48, 0x01, 0x83, 0x86,", "# 0x45 'E' [ 432, 12, 12, 10, 0, -11 ], # 0x46", "0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F, 0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83,", "], # 0x5B '[' [ 821, 6, 12, 9, 2, -11 ], #", "9, 1, -11 ], # 0x32 '2' [ 174, 9, 12, 9, 0,", "1, -11 ], # 0x32 '2' [ 174, 9, 12, 9, 0, -11", "# 0x68 'h' [ 957, 4, 12, 4, 1, -11 ], # 0x69", "0xF0, 0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06,", "'.' [ 125, 8, 12, 5, 0, -11 ], # 0x2F '/' [", "-11 ], # 0x6B 'k' [ 989, 4, 12, 5, 1, -11 ],", "8, 8, 1, -7 ], # 0x76 'v' [ 1089, 11, 8, 12,", "[ 275, 4, 8, 4, 1, -7 ], # 0x3A ':' [ 279,", "0x5C, 0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C,", "0, -11 ], # 0x5B '[' [ 821, 6, 12, 9, 2, -11", "0x30, 0xF0, 0x60, 0x10, 0x04, 0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C, 0x2C, 0x26,", "[ 821, 6, 12, 9, 2, -11 ], # 0x5C '\\' [ 830,", "3, -11 ], # 0x27 ''' [ 81, 6, 15, 6, 1, -11", "0x8C, 0x6C, 0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00,", "[ 9, 10, 12, 9, 0, -11 ], # 0x23 '#' [ 24,", "], # 0x6E 'n' [ 1016, 9, 8, 9, 0, -7 ], #", "0x8E ] FreeSerifItalic9pt7bGlyphs = [ [ 0, 0, 0, 5, 0, 1 ],", "9, 12, 9, 0, -7 ], # 0x71 'q' [ 1054, 7, 8,", "0x75 'u' [ 1082, 7, 8, 8, 1, -7 ], # 0x76 'v'", "0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8,", "9, 12, 9, 0, -11 ], # 0x34 '4' [ 202, 9, 12,", "1, -8 ], # 0x3C '<' [ 295, 9, 5, 12, 2, -6", "0x80, 0x70, 0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04,", "12, 10, 0, -11 ], # 0x4C 'L' [ 551, 16, 12, 15,", "0x81, 0x80, 0x80, 0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00,", "0x1B, 0x8F, 0x80, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61,", "], # 0x7B '[' [ 1144, 1, 12, 5, 2, -11 ], #", "0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02, 0x00, 0x40, 0x3E, 0x03, 0x00,", "0x10, 0x4D, 0x84, 0x28, 0x21, 0x41, 0x06, 0x10, 0x21, 0xE1, 0x00, 0x07, 0x83,", "1, -11 ], # 0x28 '(' [ 93, 6, 15, 6, 0, -11", "174, 9, 12, 9, 0, -11 ], # 0x33 '3' [ 188, 9,", "# 0x59 'Y' [ 790, 11, 12, 10, 0, -11 ], # 0x5A", "0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E, 0x77, 0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D,", "# 0x5C '\\' [ 830, 6, 15, 7, 1, -11 ], # 0x5D", "-11 ], # 0x57 'W' [ 757, 12, 12, 12, 0, -11 ],", "0x60, 0x83, 0xF0, 0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F,", "0x34, 0x05, 0x01, 0x80, 0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4, 0x19,", "'~' FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps, FreeSerifItalic9pt7bGlyphs, 0x20, 0x7E, 22 ] # Approx. 1835", "0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4, 0xDA, 0xC7, 0xC1, 0x00, 0x80,", "0x0C, 0x60, 0x83, 0xF0, 0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x18, 0x30,", "0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0, 0x76, 0x50, 0xC1, 0x06, 0x08, 0x10, 0x60,", "0x90, 0x50, 0x28, 0x18, 0x08, 0x08, 0x08, 0x18, 0x00, 0x3F, 0x42, 0x04, 0x08,", "0x84, 0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0, 0x38, 0x07, 0x00,", "0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C, 0x9D, 0xEE, 0x62, 0xC4, 0x89,", "15, 7, 0, -11 ], # 0x5B '[' [ 821, 6, 12, 9,", "0x01, 0x00, 0x60, 0x14, 0x04, 0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04, 0x20, 0x88,", "9, 9, 12, 1, -8 ], # 0x2B '+' [ 122, 2, 4,", "0, -8 ], # 0x74 't' [ 1074, 8, 8, 9, 1, -7", "0x5B '[' [ 821, 6, 12, 9, 2, -11 ], # 0x5C '\\'", "0x5E '^' [ 849, 9, 1, 9, 0, 2 ], # 0x5F '_'", "17, 8, -1, -12 ], # 0x66 'f' [ 929, 9, 12, 8,", "0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0, 0x63, 0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C,", "# 0x2F '/' [ 137, 9, 13, 9, 1, -12 ], # 0x30", "0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44, 0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1,", "0x18, 0x38, 0x70, 0xC0, 0x80, 0x00, 0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20,", "1, -11 ], # 0x40 '@' [ 343, 11, 11, 12, 0, -10", "0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F, 0x07, 0x87, 0xC3, 0x61, 0xB1,", "[ 1082, 7, 8, 8, 1, -7 ], # 0x76 'v' [ 1089,", "'+' [ 122, 2, 4, 5, 0, -1 ], # 0x2C ',' [", "0x80, 0x06, 0x00, 0x0F, 0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27,", "0x40, 0x18, 0x06, 0x05, 0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02, 0x04,", "12, 9, 0, -11 ], # 0x62 'b' [ 876, 8, 8, 7,", "0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60, 0x33, 0x01, 0x0C, 0x18, 0x61, 0xE7,", "# 0x56 'V' [ 734, 15, 12, 16, 2, -11 ], # 0x57", "'_' [ 851, 3, 3, 5, 2, -11 ], # 0x60 '`' [", "0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9, 0x8E, 0x31, 0x04, 0x01, 0x80,", "0x60, 0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38, 0x86,", "12, 11, 2, -11 ], # 0x59 'Y' [ 790, 11, 12, 10,", "0x40, 0x80, 0x06, 0x00, 0x0F, 0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2,", "], # 0x36 '6' [ 231, 9, 12, 9, 1, -11 ], #", "0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0, 0x38, 0x0C, 0x06, 0x03,", "0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11, 0x04, 0x83, 0x20, 0x1C, 0x1B, 0x99, 0x4D,", "'@' [ 343, 11, 11, 12, 0, -10 ], # 0x41 'A' [", "0x16, 0x4C, 0x26, 0x48, 0x26, 0x98, 0x27, 0x18, 0x27, 0x10, 0x42, 0x30, 0xF4,", "0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31, 0x88, 0x62, 0x30,", "12, 12, 10, 0, -11 ], # 0x45 'E' [ 432, 12, 12,", "0x63, 0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30, 0x31,", "], # 0x2E '.' [ 125, 8, 12, 5, 0, -11 ], #", "# 0x7B '[' [ 1144, 1, 12, 5, 2, -11 ], # 0x7C", "10, 0, -11 ], # 0x45 'E' [ 432, 12, 12, 10, 0,", "11, 12, 11, 2, -11 ], # 0x54 'T' [ 699, 12, 12,", "830, 6, 15, 7, 1, -11 ], # 0x5D ']' [ 842, 8,", "], # 0x48 'H' [ 489, 7, 12, 6, 0, -11 ], #", "0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2, 0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00,", "# 0x7C '|' [ 1146, 7, 16, 7, 0, -12 ], # 0x7D", "468, 14, 12, 13, 0, -11 ], # 0x48 'H' [ 489, 7,", "[ 295, 9, 5, 12, 2, -6 ], # 0x3D '=' [ 301,", "[ 24, 9, 15, 9, 1, -12 ], # 0x24 '$' [ 41,", "4, 8, 4, 1, -7 ], # 0x3A ':' [ 279, 4, 10,", "0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C, 0x23, 0xC8, 0xC4, 0x21, 0x18,", "[ 123, 4, 1, 6, 1, -3 ], # 0x2D '-' [ 124,", "0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C, 0x23, 0xC8, 0xC4, 0x21,", "0xFC, 0x24, 0x11, 0x04, 0x83, 0x20, 0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0,", "[ 1100, 9, 8, 8, -1, -7 ], # 0x78 'x' [ 1109,", "0x78, 0x06, 0x01, 0xEF, 0x66, 0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE,", "'#' [ 111, 9, 9, 12, 1, -8 ], # 0x2B '+' [", "[ 884, 9, 12, 9, 0, -11 ], # 0x64 'd' [ 898,", "0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F, 0x84, 0x04, 0x03, 0x80, 0x60, 0x18, 0x0C,", "0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02, 0x00, 0x40, 0x3E, 0x03, 0x00, 0x40, 0x08,", "0x86, 0xC3, 0x61, 0xB1, 0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40, 0x40,", "7, 1, -11 ], # 0x5D ']' [ 842, 8, 7, 8, 0,", "0x30, 0x41, 0x02, 0x00, 0x10, 0x40, 0x82, 0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84,", "0x81, 0x90, 0x14, 0x03, 0x00, 0x60, 0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86,", "0x5C '\\' [ 830, 6, 15, 7, 1, -11 ], # 0x5D ']'", "0, -11 ], # 0x5E '^' [ 849, 9, 1, 9, 0, 2", "[ 1008, 8, 8, 9, 0, -7 ], # 0x6E 'n' [ 1016,", "0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0, 0x63, 0x82, 0x0C, 0x30, 0x31, 0x81,", "0x08, 0x20, 0x41, 0x38, 0x20, 0x82, 0x08, 0x41, 0x04, 0x10, 0xC2, 0x08, 0x20,", "0x8E, 0x70, 0x88, 0x46, 0x23, 0x20, 0x90, 0x50, 0x28, 0x18, 0x08, 0x08, 0x08,", "-11 ], # 0x22 '\"' [ 9, 10, 12, 9, 0, -11 ],", "0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43, 0x01, 0x80, 0xC0,", "'D' [ 414, 12, 12, 10, 0, -11 ], # 0x45 'E' [", "0x0F, 0x18, 0xD8, 0x7C, 0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C,", "0x0D, 0x83, 0x7F, 0xE0, 0x1C, 0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14,", "0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66, 0x24, 0x24, 0xC3, 0x8C, 0x10,", "0, -11 ], # 0x4E 'N' [ 595, 11, 12, 12, 1, -11", "0, -11 ], # 0x5A 'Z' [ 807, 7, 15, 7, 0, -11", "0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60, 0x33, 0x01, 0x0C, 0x18,", "[ 0x11, 0x12, 0x22, 0x24, 0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20,", "8, 3, 10, 1, -5 ] ] # 0x7E '~' FreeSerifItalic9pt7b = [", "0x01, 0xE0, 0x40, 0x08, 0x02, 0x00, 0x40, 0x3E, 0x03, 0x00, 0x40, 0x08, 0x01,", "0x10, 0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F,", "[ 231, 9, 12, 9, 1, -11 ], # 0x37 '7' [ 245,", "2, 5, 0, -1 ], # 0x2E '.' [ 125, 8, 12, 5,", "2, -11 ], # 0x7C '|' [ 1146, 7, 16, 7, 0, -12", "], # 0x37 '7' [ 245, 9, 13, 9, 1, -12 ], #", "12, 10, 0, -11 ], # 0x50 'P' [ 629, 11, 15, 12,", "0x21, 0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02, 0x06, 0x0C,", "1089, 11, 8, 12, 1, -7 ], # 0x77 'w' [ 1100, 9,", "0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43, 0x01, 0x80,", "0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4,", "14, 1, -11 ], # 0x40 '@' [ 343, 11, 11, 12, 0,", "0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82, 0x04,", "3, -11 ], # 0x2A '#' [ 111, 9, 9, 12, 1, -8", "'2' [ 174, 9, 12, 9, 0, -11 ], # 0x33 '3' [", "0x03, 0x10, 0x30, 0x02, 0x00, 0x60, 0x0F, 0x80, 0x07, 0x91, 0x87, 0x30, 0x26,", "], # 0x74 't' [ 1074, 8, 8, 9, 1, -7 ], #", "0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4, 0x19, 0x86,", "0x5D ']' [ 842, 8, 7, 8, 0, -11 ], # 0x5E '^'", "0x69 'i' [ 963, 7, 16, 5, -1, -11 ], # 0x6A 'j'", "1, -11 ], # 0x43 'C' [ 394, 13, 12, 13, 0, -11", "0x01, 0x80, 0x70, 0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F,", "0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0, 0x63, 0x82, 0x0C,", "'$' [ 41, 14, 12, 15, 1, -11 ], # 0x25 '%' [", "0xC1, 0x82, 0x04, 0x1C, 0x30, 0x40, 0x83, 0x04, 0x08, 0x20, 0x60, 0x99, 0x8E", "9, 9, 10, 1, -8 ], # 0x3E '>' [ 312, 7, 12,", "0x86, 0x7F, 0x80, 0x07, 0x91, 0x86, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC0,", "0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40, 0x60, 0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2,", "# 0x79 'y' [ 1123, 8, 9, 7, 0, -7 ], # 0x7A", "0x88, 0x62, 0x30, 0xF0, 0x60, 0x10, 0x04, 0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C,", "4, 1, -7 ], # 0x3B '' [ 284, 9, 9, 10, 1,", "9, 8, 8, -1, -7 ], # 0x78 'x' [ 1109, 9, 12,", "0x3F, 0xC0, 0x63, 0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07,", "81, 6, 15, 6, 1, -11 ], # 0x28 '(' [ 93, 6,", "0x08, 0x61, 0x04, 0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20,", "0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04, 0x08, 0x08, 0x10, 0x30, 0x20,", "0x0C, 0x20, 0x40, 0x80, 0x06, 0x00, 0x0F, 0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC,", "0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C, 0x81, 0x18, 0x43, 0x08,", "0x26, 0x20, 0x99, 0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01,", "0x26, 0x02, 0x60, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83,", "0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0, 0x3E, 0x3C, 0x30, 0x60,", "0x18, 0x20, 0xC1, 0x04, 0x08, 0x20, 0x41, 0x38, 0x20, 0x82, 0x08, 0x41, 0x04,", "[ 551, 16, 12, 15, 0, -11 ], # 0x4D 'M' [ 575,", "], # 0x64 'd' [ 898, 7, 8, 7, 0, -7 ], #", "0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02, 0x00, 0x40, 0x3E, 0x03,", "], # 0x78 'x' [ 1109, 9, 12, 9, 0, -7 ], #", "0x4C 'L' [ 551, 16, 12, 15, 0, -11 ], # 0x4D 'M'", "# 0x4B 'K' [ 534, 11, 12, 10, 0, -11 ], # 0x4C", "1, -11 ], # 0x4F 'O' [ 612, 11, 12, 10, 0, -11", "0x36 '6' [ 231, 9, 12, 9, 1, -11 ], # 0x37 '7'", "0, -11 ], # 0x4A 'J' [ 514, 13, 12, 12, 0, -11", "0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60, 0x33, 0x01, 0x0C, 0x18, 0x61,", "], # 0x68 'h' [ 957, 4, 12, 4, 1, -11 ], #", "1, 12, 5, 2, -11 ], # 0x7C '|' [ 1146, 7, 16,", "12, 9, 2, -11 ], # 0x5C '\\' [ 830, 6, 15, 7,", "376, 12, 12, 11, 1, -11 ], # 0x43 'C' [ 394, 13,", "0x46, 0x04, 0x60, 0x46, 0x04, 0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03,", "[ 977, 8, 12, 8, 0, -11 ], # 0x6B 'k' [ 989,", "0xFA, 0x08, 0x21, 0x08, 0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00,", "0x86, 0x30, 0xC4, 0x19, 0x86, 0x7F, 0x80, 0x07, 0x91, 0x86, 0x30, 0x26, 0x02,", "0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08, 0x04, 0x02, 0x03, 0x71,", "0xD1, 0xB3, 0x5C, 0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6,", "0x86, 0x8C, 0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C,", "0x04, 0x60, 0x46, 0x04, 0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0,", "0x39, 0x91, 0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00, 0x60, 0x14,", "0x44, 0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43, 0x01,", "# 0x20 ' ' [ 0, 4, 12, 6, 1, -11 ], #", "0x07, 0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1, 0x88, 0xC3, 0xC0, 0x7F,", "0x46, 0x04, 0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC,", "0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0, 0xC5, 0x03, 0x34, 0xE7,", "0x08, 0x08, 0x10, 0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36,", "[ 301, 9, 9, 10, 1, -8 ], # 0x3E '>' [ 312,", "0xC1, 0x98, 0x31, 0x84, 0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0,", "0x04, 0x01, 0x80, 0x30, 0x06, 0x00, 0x80, 0x30, 0x06, 0x00, 0x80, 0x7E, 0x00,", "0x00, 0x00, 0x44, 0x48, 0x01, 0x83, 0x86, 0x1C, 0x0C, 0x03, 0x80, 0x30, 0x07,", "-8 ], # 0x2B '+' [ 122, 2, 4, 5, 0, -1 ],", "0x01, 0x00, 0x0F, 0x84, 0x04, 0x03, 0x80, 0x60, 0x18, 0x0C, 0x06, 0x03, 0x03,", "0x83, 0x03, 0x02, 0x06, 0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0,", "], # 0x34 '4' [ 202, 9, 12, 9, 0, -11 ], #", "682, 11, 12, 11, 2, -11 ], # 0x54 'T' [ 699, 12,", "0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26, 0x48,", "13, 8, 13, 0, -7 ], # 0x6D 'm' [ 1008, 8, 8,", "12, 9, 0, -11 ], # 0x68 'h' [ 957, 4, 12, 4,", "1, -11 ], # 0x47 'G' [ 468, 14, 12, 13, 0, -11", "0x21, 0x11, 0x18, 0x88, 0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F, 0x84, 0x04, 0x03,", "12, 14, 1, -11 ], # 0x26 '&' [ 80, 2, 4, 4,", "0x20, 0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78, 0x61, 0x82, 0x10,", "0x03, 0x1E, 0x00, 0x01, 0x83, 0x87, 0x07, 0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3,", "0xC0, 0xD1, 0xB3, 0x5C, 0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42,", "275, 4, 8, 4, 1, -7 ], # 0x3A ':' [ 279, 4,", "0x06, 0x00, 0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10,", "0xE0, 0x30, 0x00, 0xE2, 0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0, 0x83,", "# 0x50 'P' [ 629, 11, 15, 12, 1, -11 ], # 0x51", "0x08, 0x62, 0x0C, 0x81, 0x90, 0x14, 0x03, 0x00, 0x60, 0x08, 0x00, 0xFB, 0xCE,", "0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06, 0x0C, 0x18,", "0x0F, 0x84, 0x04, 0x03, 0x80, 0x60, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E,", "0x83, 0xE0, 0x3F, 0xC0, 0x63, 0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40,", "12, 5, 2, -11 ], # 0x7C '|' [ 1146, 7, 16, 7,", "0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8, 0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36,", "], # 0x3B '' [ 284, 9, 9, 10, 1, -8 ], #", "0x84, 0x04, 0x03, 0x80, 0x60, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E, 0x00,", "1, 9, 0, 2 ], # 0x5F '_' [ 851, 3, 3, 5,", "0x04, 0x18, 0x20, 0xC1, 0x04, 0x08, 0x20, 0x41, 0x38, 0x20, 0x82, 0x08, 0x41,", "0x31, 0x8C, 0x43, 0x31, 0x88, 0x62, 0x30, 0xF0, 0x60, 0x10, 0x04, 0x03, 0x80,", "260, 9, 13, 9, 0, -12 ], # 0x39 '9' [ 275, 4,", "-12 ], # 0x66 'f' [ 929, 9, 12, 8, 0, -7 ],", "], # 0x72 'r' [ 1061, 7, 8, 6, 0, -7 ], #", "0xC1, 0x04, 0x08, 0x20, 0x41, 0x38, 0x20, 0x82, 0x08, 0x41, 0x04, 0x10, 0xC2,", "0x60, 0x20, 0x20, 0x10, 0x10, 0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33,", "0x04, 0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D, 0x84, 0x28, 0x21, 0x41, 0x06, 0x10,", "[ 757, 12, 12, 12, 0, -11 ], # 0x58 'X' [ 775,", "0xB3, 0x5C, 0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86,", "0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65, 0x8B, 0x2F,", "12, 9, 0, -11 ], # 0x23 '#' [ 24, 9, 15, 9,", "0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30, 0x31, 0x03,", "0x40, 0x80, 0x80, 0x40, 0x40, 0x60, 0x20, 0x20, 0x10, 0x10, 0x18, 0x08, 0x00,", "0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81,", "0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C, 0x3C, 0x3E, 0x1B,", "0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40, 0x60, 0x20, 0xF1, 0x89, 0x8C, 0xC4,", "0x81, 0x04, 0x18, 0x20, 0xC1, 0x04, 0x08, 0x20, 0x41, 0x38, 0x20, 0x82, 0x08,", "0x42, 0x04, 0x08, 0x10, 0x20, 0x40, 0x72, 0x0E, 0x08, 0x61, 0x04, 0x30, 0x86,", "0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C, 0x36,", "'9' [ 275, 4, 8, 4, 1, -7 ], # 0x3A ':' [", "343, 11, 11, 12, 0, -10 ], # 0x41 'A' [ 359, 11,", "0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F, 0x07,", "0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00, 0x1F, 0x33, 0x60,", "898, 7, 8, 7, 0, -7 ], # 0x65 'e' [ 905, 11,", "0x6C 'l' [ 995, 13, 8, 13, 0, -7 ], # 0x6D 'm'", "0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26, 0x48, 0x26, 0x98, 0x27, 0x18, 0x27, 0x10,", "0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00, 0x3F, 0xC1, 0x9C, 0x21, 0x8C,", "-11 ], # 0x35 '5' [ 216, 9, 13, 9, 1, -12 ],", "], # 0x2D '-' [ 124, 2, 2, 5, 0, -1 ], #", "], # 0x27 ''' [ 81, 6, 15, 6, 1, -11 ], #", "0x20, 0x40, 0x72, 0x0E, 0x08, 0x61, 0x04, 0x30, 0x86, 0x08, 0x61, 0x04, 0x30,", "5, 2, -11 ], # 0x60 '`' [ 853, 9, 8, 9, 0,", "-7 ], # 0x6E 'n' [ 1016, 9, 8, 9, 0, -7 ],", "12, 11, 2, -11 ], # 0x54 'T' [ 699, 12, 12, 13,", "0x20, 0x90, 0x50, 0x28, 0x18, 0x08, 0x08, 0x08, 0x18, 0x00, 0x3F, 0x42, 0x04,", "0x11, 0xB9, 0x8E, 0x77, 0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18,", "0x24 '$' [ 41, 14, 12, 15, 1, -11 ], # 0x25 '%'", "-11 ], # 0x43 'C' [ 394, 13, 12, 13, 0, -11 ],", "[ 807, 7, 15, 7, 0, -11 ], # 0x5B '[' [ 821,", "0, -7 ], # 0x67 'g' [ 943, 9, 12, 9, 0, -11", "], # 0x4D 'M' [ 575, 13, 12, 12, 0, -11 ], #", "0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26, 0x20, 0x99, 0x84, 0x3C,", "0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8, 0x34, 0x05, 0x01, 0x80, 0x40, 0x30,", "0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43, 0x01, 0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70,", "0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40, 0x18, 0x06, 0x05, 0x81, 0x7F,", "], # 0x41 'A' [ 359, 11, 12, 11, 0, -11 ], #", "8, 0, -11 ], # 0x6B 'k' [ 989, 4, 12, 5, 1,", "-11 ], # 0x42 'B' [ 376, 12, 12, 11, 1, -11 ],", "0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9, 0x8E, 0x31, 0x04,", "-11 ], # 0x4A 'J' [ 514, 13, 12, 12, 0, -11 ],", "0, 5, 0, 1 ], # 0x20 ' ' [ 0, 4, 12,", "10, 0, -11 ], # 0x46 'F' [ 450, 12, 12, 12, 1,", "8, 2, -11 ], # 0x3F '?' [ 323, 13, 12, 14, 1,", "16, 12, 15, 0, -11 ], # 0x4D 'M' [ 575, 13, 12,", "0x40, 0x20, 0x10, 0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04, 0x08,", "1, -11 ], # 0x6C 'l' [ 995, 13, 8, 13, 0, -7", "0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C, 0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47,", "0, 2 ], # 0x5F '_' [ 851, 3, 3, 5, 2, -11", "0x80, 0x00, 0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06, 0x00,", "'\"' [ 9, 10, 12, 9, 0, -11 ], # 0x23 '#' [", "], # 0x5E '^' [ 849, 9, 1, 9, 0, 2 ], #", "7, 8, 7, 0, -7 ], # 0x72 'r' [ 1061, 7, 8,", "0xEC, 0x0C, 0x81, 0x18, 0x43, 0x08, 0x62, 0x0C, 0x81, 0x90, 0x14, 0x03, 0x00,", "0x20, 0x82, 0x08, 0x41, 0x04, 0x10, 0xC2, 0x08, 0x20, 0x8C, 0x00, 0x18, 0x18,", "], # 0x32 '2' [ 174, 9, 12, 9, 0, -11 ], #", "0xC1, 0x83, 0x04, 0x18, 0x30, 0x41, 0x87, 0x80, 0x0F, 0x81, 0x80, 0x80, 0xC0,", "12, 8, 0, -11 ], # 0x53 'S' [ 682, 11, 12, 11,", "12, 8, 0, -11 ], # 0x6B 'k' [ 989, 4, 12, 5,", "'l' [ 995, 13, 8, 13, 0, -7 ], # 0x6D 'm' [", "0x1F, 0x00, 0x01, 0x01, 0x81, 0x41, 0x61, 0x21, 0x11, 0x18, 0x88, 0xFF, 0x02,", "'x' [ 1109, 9, 12, 9, 0, -7 ], # 0x79 'y' [", "188, 9, 12, 9, 0, -11 ], # 0x34 '4' [ 202, 9,", "0x38, 0x07, 0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9, 0x8E,", "'%' [ 62, 12, 12, 14, 1, -11 ], # 0x26 '&' [", "0x0C, 0x81, 0x18, 0x43, 0x08, 0x62, 0x0C, 0x81, 0x90, 0x14, 0x03, 0x00, 0x60,", "0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82, 0x04, 0x1C, 0x30, 0x40, 0x83, 0x04, 0x08,", "0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C, 0x07, 0x0C, 0x0E, 0x0C, 0x14,", "0x11, 0x12, 0x22, 0x24, 0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20, 0x98,", "0x44, 0x48, 0x01, 0x83, 0x86, 0x1C, 0x0C, 0x03, 0x80, 0x30, 0x07, 0x00, 0x80,", "10, 1, -8 ], # 0x3C '<' [ 295, 9, 5, 12, 2,", "0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x00, 0x60,", "0x08, 0x20, 0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF, 0x80,", "0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x04, 0x60, 0x8F,", "12, 9, 0, -11 ], # 0x64 'd' [ 898, 7, 8, 7,", "[ 489, 7, 12, 6, 0, -11 ], # 0x49 'I' [ 500,", "13, 9, 1, -12 ], # 0x30 '0' [ 152, 6, 13, 9,", "0, -7 ], # 0x6E 'n' [ 1016, 9, 8, 9, 0, -7", "0x08, 0xC0, 0x60, 0xC1, 0xE0, 0x38, 0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F, 0x00,", "0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00, 0x10, 0x40, 0x82, 0x0C, 0x30, 0xC3, 0x0C,", "0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs = [ [ 0, 0, 0, 5, 0, 1", "0x01, 0x83, 0x87, 0x07, 0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1, 0x88,", "3, 3, 5, 2, -11 ], # 0x60 '`' [ 853, 9, 8,", "'R' [ 667, 10, 12, 8, 0, -11 ], # 0x53 'S' [", "0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8, 0x3F, 0xF0, 0xC2,", "-11 ], # 0x56 'V' [ 734, 15, 12, 16, 2, -11 ],", "0x70, 0xC0, 0x80, 0x00, 0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80,", "3, 10, 1, -5 ] ] # 0x7E '~' FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps,", "0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4, 0xDA, 0xC7, 0xC1, 0x00,", "0x23, 0x10, 0xC8, 0x34, 0x05, 0x01, 0x80, 0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0,", "# 0x48 'H' [ 489, 7, 12, 6, 0, -11 ], # 0x49", "0x30, 0x7F, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78,", "0x10, 0x30, 0x02, 0x00, 0x60, 0x0F, 0x80, 0x07, 0x91, 0x87, 0x30, 0x26, 0x02,", "-11 ], # 0x64 'd' [ 898, 7, 8, 7, 0, -7 ],", "0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E, 0x00, 0x01, 0x83, 0x87, 0x07, 0x03, 0x03,", "-1, -12 ], # 0x66 'f' [ 929, 9, 12, 8, 0, -7", "0x04, 0x08, 0x08, 0x10, 0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C,", "# 0x28 '(' [ 93, 6, 15, 6, 0, -11 ], # 0x29", "0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C, 0x36, 0x1B, 0x0D, 0x86, 0xE6,", "0x70, 0x38, 0x10, 0x10, 0x10, 0x37, 0x22, 0x24, 0x38, 0x78, 0x48, 0x4D, 0xC6,", "0x04, 0x02, 0x03, 0x03, 0xC0, 0x76, 0x50, 0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A,", "0x00, 0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40, 0x60,", "0x41, 0x87, 0x80, 0x0F, 0x81, 0x80, 0x80, 0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C,", "0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F, 0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88,", "0x63 'c' [ 884, 9, 12, 9, 0, -11 ], # 0x64 'd'", "0x00, 0x80, 0x30, 0x06, 0x00, 0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46,", "0x21, 0x08, 0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00, 0x10, 0x40,", "12, 2, -6 ], # 0x3D '=' [ 301, 9, 9, 10, 1,", "9, 10, 1, -8 ], # 0x3C '<' [ 295, 9, 5, 12,", "9, 1, -12 ], # 0x30 '0' [ 152, 6, 13, 9, 1,", "'4' [ 202, 9, 12, 9, 0, -11 ], # 0x35 '5' [", "-11 ], # 0x7C '|' [ 1146, 7, 16, 7, 0, -12 ],", "0x38, 0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F, 0x00, 0x01, 0x01, 0x81, 0x41, 0x61,", "0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26, 0x48, 0x26, 0x98, 0x27, 0x18,", "0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C, 0x9D, 0xEE, 0x62,", "0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80, 0x04, 0x70, 0xC3, 0x08, 0x21,", "7, 8, 8, 1, -7 ], # 0x76 'v' [ 1089, 11, 8,", "0x24, 0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8, 0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C,", "[ 898, 7, 8, 7, 0, -7 ], # 0x65 'e' [ 905,", "0x01, 0x80, 0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4, 0x19, 0x03, 0x00,", "0x33, 0x00, 0x00, 0xCC, 0x33, 0x00, 0x00, 0x44, 0x48, 0x01, 0x83, 0x86, 0x1C,", "0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C, 0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03,", "0x8C, 0x43, 0x31, 0x88, 0x62, 0x30, 0xF0, 0x60, 0x10, 0x04, 0x03, 0x80, 0x0F,", "[ 1054, 7, 8, 7, 0, -7 ], # 0x72 'r' [ 1061,", "0x0E, 0x65, 0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02, 0x00,", "0x73 's' [ 1068, 5, 9, 4, 0, -8 ], # 0x74 't'", "0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8, 0x3F,", "'r' [ 1061, 7, 8, 6, 0, -7 ], # 0x73 's' [", "575, 13, 12, 12, 0, -11 ], # 0x4E 'N' [ 595, 11,", "0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40, 0x40, 0x60, 0x20, 0x20, 0x10,", "0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4, 0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03,", "10, 12, 8, -1, -7 ], # 0x70 'p' [ 1040, 9, 12,", "0x08, 0x21, 0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02, 0x06,", "-11 ], # 0x52 'R' [ 667, 10, 12, 8, 0, -11 ],", "0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30, 0x31, 0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F,", "0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x81, 0x06, 0x0C,", "0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x00,", "0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01, 0xE0, 0xBC, 0x82, 0x41, 0x31,", "'J' [ 514, 13, 12, 12, 0, -11 ], # 0x4B 'K' [", "0x86, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00,", "# 0x70 'p' [ 1040, 9, 12, 9, 0, -7 ], # 0x71", "0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x81, 0x06,", "1123, 8, 9, 7, 0, -7 ], # 0x7A 'z' [ 1132, 6,", "0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82, 0x04, 0x1C, 0x30,", "11, 12, 12, 1, -11 ], # 0x4F 'O' [ 612, 11, 12,", "0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06, 0x00, 0x0F, 0xC0, 0xC3, 0x0C,", "0x4B 'K' [ 534, 11, 12, 10, 0, -11 ], # 0x4C 'L'", "0x5E, 0x6A, 0x93, 0x08, 0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10,", "10, 1, -5 ] ] # 0x7E '~' FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps, FreeSerifItalic9pt7bGlyphs,", "0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0, 0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0,", "0x46, 0x23, 0x20, 0x90, 0x50, 0x28, 0x18, 0x08, 0x08, 0x08, 0x18, 0x00, 0x3F,", "0x08, 0x08, 0x08, 0x18, 0x00, 0x3F, 0x42, 0x04, 0x08, 0x10, 0x20, 0x40, 0x72,", "9, 0, -7 ], # 0x6E 'n' [ 1016, 9, 8, 9, 0,", "], # 0x35 '5' [ 216, 9, 13, 9, 1, -12 ], #", "0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11, 0x04, 0x83, 0x20, 0x1C, 0x1B,", "'' [ 284, 9, 9, 10, 1, -8 ], # 0x3C '<' [", "0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08, 0x04, 0x02, 0x03,", "0x47 'G' [ 468, 14, 12, 13, 0, -11 ], # 0x48 'H'", "0x62, 0x70, 0x7F, 0xE9, 0x8E, 0x31, 0x04, 0x01, 0x80, 0x30, 0x06, 0x00, 0x80,", "0x30, 0x31, 0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80,", "12, 10, 0, -11 ], # 0x45 'E' [ 432, 12, 12, 10,", "0x7C '|' [ 1146, 7, 16, 7, 0, -12 ], # 0x7D ']'", "12, 1, -7 ], # 0x77 'w' [ 1100, 9, 8, 8, -1,", "202, 9, 12, 9, 0, -11 ], # 0x35 '5' [ 216, 9,", "0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0, 0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C,", "2, 4, 4, 3, -11 ], # 0x27 ''' [ 81, 6, 15,", "0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00, 0x10, 0x40, 0x82, 0x0C,", "[ 842, 8, 7, 8, 0, -11 ], # 0x5E '^' [ 849,", "0x1C, 0x13, 0x49, 0xA4, 0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E,", "0x61, 0xB1, 0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40, 0x40, 0x60, 0x20,", "4, 3, -11 ], # 0x27 ''' [ 81, 6, 15, 6, 1,", "0x0C, 0x03, 0x80, 0x30, 0x07, 0x00, 0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xF8,", "0x04, 0x82, 0x20, 0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11, 0x04,", "0x87, 0xE1, 0x80, 0x30, 0x04, 0x01, 0x80, 0x78, 0x00, 0x07, 0x83, 0x18, 0xC1,", "0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39, 0x91, 0xCC, 0x93, 0x3B,", "0x83, 0x04, 0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D, 0x84, 0x28, 0x21, 0x41, 0x06,", "0x60, 0x33, 0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C,", "0x00, 0x00, 0xC0, 0x83, 0x04, 0x08, 0x10, 0x60, 0x81, 0x02, 0x04, 0x70, 0x38,", "0x83, 0x18, 0xC1, 0xB0, 0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31,", "0x00, 0x60, 0x14, 0x04, 0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B,", "0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E, 0x6A,", "0x78, 0x1E, 0x18, 0x20, 0xC1, 0x83, 0x04, 0x18, 0x30, 0x41, 0x87, 0x80, 0x0F,", "0xE0, 0x40, 0x08, 0x02, 0x00, 0x40, 0x3E, 0x03, 0x00, 0x40, 0x08, 0x01, 0x00,", "0x20, 0xC1, 0x04, 0x08, 0x20, 0x41, 0x38, 0x20, 0x82, 0x08, 0x41, 0x04, 0x10,", "0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40, 0x18, 0x06, 0x05, 0x81, 0x7F, 0xE0,", "], # 0x6A 'j' [ 977, 8, 12, 8, 0, -11 ], #", "9, 0, -11 ], # 0x34 '4' [ 202, 9, 12, 9, 0,", "0x22, 0x24, 0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20, 0x98, 0x24, 0x7F,", "0x82, 0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E,", "0x82, 0x08, 0x41, 0x04, 0x10, 0xC2, 0x08, 0x20, 0x8C, 0x00, 0x18, 0x18, 0x2C,", "1, -12 ], # 0x24 '$' [ 41, 14, 12, 15, 1, -11", "11, 12, 10, 0, -11 ], # 0x4C 'L' [ 551, 16, 12,", "0x66, 0x07, 0x30, 0x31, 0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08,", "0x68 'h' [ 957, 4, 12, 4, 1, -11 ], # 0x69 'i'", "12, 12, 2, -11 ], # 0x56 'V' [ 734, 15, 12, 16,", "0x07, 0x83, 0x18, 0xC1, 0xB0, 0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8,", "0x64 'd' [ 898, 7, 8, 7, 0, -7 ], # 0x65 'e'", "0x18, 0x08, 0x08, 0x08, 0x18, 0x00, 0x3F, 0x42, 0x04, 0x08, 0x10, 0x20, 0x40,", "1100, 9, 8, 8, -1, -7 ], # 0x78 'x' [ 1109, 9,", "0x23, 0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F, 0xC1, 0x8C, 0x21,", "105, 6, 8, 9, 3, -11 ], # 0x2A '#' [ 111, 9,", "0x48, 0x26, 0x98, 0x27, 0x18, 0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38, 0x78,", "0x63, 0x21, 0x93, 0x8F, 0x00, 0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78,", "[ 62, 12, 12, 14, 1, -11 ], # 0x26 '&' [ 80,", "-7 ], # 0x65 'e' [ 905, 11, 17, 8, -1, -12 ],", "-11 ], # 0x4C 'L' [ 551, 16, 12, 15, 0, -11 ],", "0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31,", "0x0F, 0xF8, 0x40, 0x20, 0x10, 0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06,", "], # 0x39 '9' [ 275, 4, 8, 4, 1, -7 ], #", "'[' [ 821, 6, 12, 9, 2, -11 ], # 0x5C '\\' [", "0xC8, 0x23, 0x10, 0xC8, 0x34, 0x05, 0x01, 0x80, 0x40, 0x30, 0x0C, 0x03, 0x03,", "0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0, 0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC,", "0x01, 0x80, 0x40, 0x60, 0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE,", "[ 137, 9, 13, 9, 1, -12 ], # 0x30 '0' [ 152,", "2, -11 ], # 0x5C '\\' [ 830, 6, 15, 7, 1, -11", "0x01, 0x80, 0x30, 0x06, 0x00, 0x80, 0x30, 0x06, 0x00, 0x80, 0x7E, 0x00, 0x7C,", "[ 216, 9, 13, 9, 1, -12 ], # 0x36 '6' [ 231,", "9, 12, 9, 0, -11 ], # 0x62 'b' [ 876, 8, 8,", "0x4E 'N' [ 595, 11, 12, 12, 1, -11 ], # 0x4F 'O'", "'>' [ 312, 7, 12, 8, 2, -11 ], # 0x3F '?' [", "0x04, 0x08, 0x1C, 0x00, 0x81, 0x04, 0x18, 0x20, 0xC1, 0x04, 0x08, 0x20, 0x41,", "0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43, 0x01, 0x80, 0xC0, 0xA0, 0x91,", "'=' [ 301, 9, 9, 10, 1, -8 ], # 0x3E '>' [", "0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0, 0x38, 0x0C, 0x06,", "-7 ], # 0x73 's' [ 1068, 5, 9, 4, 0, -8 ],", "152, 6, 13, 9, 1, -12 ], # 0x31 '1' [ 162, 8,", "0x3F '?' [ 323, 13, 12, 14, 1, -11 ], # 0x40 '@'", "[ 1074, 8, 8, 9, 1, -7 ], # 0x75 'u' [ 1082,", "4, 4, 3, -11 ], # 0x27 ''' [ 81, 6, 15, 6,", "5, 9, 4, 0, -8 ], # 0x74 't' [ 1074, 8, 8,", "884, 9, 12, 9, 0, -11 ], # 0x64 'd' [ 898, 7,", "0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06, 0x0C,", "0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0, 0x38, 0x0C,", "0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20, 0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23,", "'K' [ 534, 11, 12, 10, 0, -11 ], # 0x4C 'L' [", "0x30, 0xA0, 0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26, 0x20,", "216, 9, 13, 9, 1, -12 ], # 0x36 '6' [ 231, 9,", "489, 7, 12, 6, 0, -11 ], # 0x49 'I' [ 500, 9,", "], # 0x3A ':' [ 279, 4, 10, 4, 1, -7 ], #", "0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C, 0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B,", "# 0x7E '~' FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps, FreeSerifItalic9pt7bGlyphs, 0x20, 0x7E, 22 ] #", "0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E, 0x77,", "0, -11 ], # 0x49 'I' [ 500, 9, 12, 8, 0, -11", "0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02, 0x06, 0x0C, 0x08, 0x10,", "0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82, 0x04, 0x1C, 0x30, 0x40, 0x83, 0x04,", "0, -11 ], # 0x34 '4' [ 202, 9, 12, 9, 0, -11", "], # 0x6D 'm' [ 1008, 8, 8, 9, 0, -7 ], #", "0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4, 0xDA,", "7, 8, 7, 0, -7 ], # 0x65 'e' [ 905, 11, 17,", "0x04, 0x01, 0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C, 0x07, 0x0C, 0x0E,", "0x08, 0xC0, 0x38, 0x07, 0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F,", "# 0x6B 'k' [ 989, 4, 12, 5, 1, -11 ], # 0x6C", "12, 2, -11 ], # 0x56 'V' [ 734, 15, 12, 16, 2,", "-7 ], # 0x75 'u' [ 1082, 7, 8, 8, 1, -7 ],", "0x93, 0x8F, 0x00, 0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80,", "'m' [ 1008, 8, 8, 9, 0, -7 ], # 0x6E 'n' [", "0, -7 ], # 0x61 'a' [ 862, 9, 12, 9, 0, -11", "734, 15, 12, 16, 2, -11 ], # 0x57 'W' [ 757, 12,", "0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C, 0x81, 0x18, 0x43,", "0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01, 0x80, 0x30, 0x04, 0x01, 0x80, 0x30,", "], # 0x4B 'K' [ 534, 11, 12, 10, 0, -11 ], #", "0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44, 0xD8, 0x9D, 0x23, 0xA4,", "0x00, 0x3F, 0x42, 0x04, 0x08, 0x10, 0x20, 0x40, 0x72, 0x0E, 0x08, 0x61, 0x04,", "11, 12, 11, 0, -11 ], # 0x42 'B' [ 376, 12, 12,", "0x80, 0x0F, 0x81, 0x80, 0x80, 0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C, 0x04, 0x36,", "[ 1160, 8, 3, 10, 1, -5 ] ] # 0x7E '~' FreeSerifItalic9pt7b", "14, 1, -11 ], # 0x26 '&' [ 80, 2, 4, 4, 3,", "# 0x44 'D' [ 414, 12, 12, 10, 0, -11 ], # 0x45", "'H' [ 489, 7, 12, 6, 0, -11 ], # 0x49 'I' [", "12, 12, 14, 1, -11 ], # 0x26 '&' [ 80, 2, 4,", "0x63, 0x08, 0xC0, 0x38, 0x07, 0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70,", "0x00, 0x60, 0x06, 0x00, 0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8,", "0x4C, 0x26, 0x48, 0x26, 0x98, 0x27, 0x18, 0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C,", "12, 4, 1, -11 ], # 0x69 'i' [ 963, 7, 16, 5,", "0x31, 0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19,", "0x02, 0x06, 0x04, 0x08, 0x08, 0x10, 0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C,", "# 0x5B '[' [ 821, 6, 12, 9, 2, -11 ], # 0x5C", "125, 8, 12, 5, 0, -11 ], # 0x2F '/' [ 137, 9,", "-11 ], # 0x34 '4' [ 202, 9, 12, 9, 0, -11 ],", "9, 3, -11 ], # 0x2A '#' [ 111, 9, 9, 12, 1,", "0xB8, 0xE0, 0x30, 0x00, 0xE2, 0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0,", "'8' [ 260, 9, 13, 9, 0, -12 ], # 0x39 '9' [", "0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C, 0x81, 0x18, 0x43, 0x08, 0x62, 0x0C,", "0x00, 0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04, 0x60, 0x46, 0x04,", "2, 2, 5, 0, -1 ], # 0x2E '.' [ 125, 8, 12,", "0x48 'H' [ 489, 7, 12, 6, 0, -11 ], # 0x49 'I'", "12, 8, 0, -11 ], # 0x4A 'J' [ 514, 13, 12, 12,", "0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66, 0x24, 0x24,", "0x33, 0x3A, 0xEE, 0x38, 0x08, 0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63,", "-11 ], # 0x44 'D' [ 414, 12, 12, 10, 0, -11 ],", "2, -11 ], # 0x57 'W' [ 757, 12, 12, 12, 0, -11", "0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8, 0x80, 0x1F, 0x98, 0x98, 0x4C,", "0x10, 0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80,", "0x02, 0x04, 0x60, 0x8F, 0xF8, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81,", "# 0x5A 'Z' [ 807, 7, 15, 7, 0, -11 ], # 0x5B", "12, 9, 0, -11 ], # 0x34 '4' [ 202, 9, 12, 9,", "0x52 'R' [ 667, 10, 12, 8, 0, -11 ], # 0x53 'S'", "15, 0, -11 ], # 0x4D 'M' [ 575, 13, 12, 12, 0,", "0x4A 'J' [ 514, 13, 12, 12, 0, -11 ], # 0x4B 'K'", "0x81, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18,", "0x30, 0x07, 0x00, 0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xF8, 0xC0, 0x1C, 0x03,", "0x83, 0xFF, 0x80, 0xD8, 0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A,", "0x83, 0x20, 0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49,", "12, 5, 1, -11 ], # 0x6C 'l' [ 995, 13, 8, 13,", "0x7D ']' [ 1160, 8, 3, 10, 1, -5 ] ] # 0x7E", "11, 12, 12, 2, -11 ], # 0x56 'V' [ 734, 15, 12,", "0x86, 0x11, 0x8C, 0x43, 0x38, 0x86, 0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0,", "# 0x63 'c' [ 884, 9, 12, 9, 0, -11 ], # 0x64", "0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C, 0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0,", "0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x04, 0x60,", "0x70 'p' [ 1040, 9, 12, 9, 0, -7 ], # 0x71 'q'", "0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0, 0x38, 0x07, 0x00, 0x60, 0x0C, 0x43,", "], # 0x6B 'k' [ 989, 4, 12, 5, 1, -11 ], #", "0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04, 0x60, 0x46, 0x04, 0x40, 0x8C, 0x08,", "394, 13, 12, 13, 0, -11 ], # 0x44 'D' [ 414, 12,", "0x0C, 0x30, 0x41, 0x02, 0x00, 0x10, 0x40, 0x82, 0x0C, 0x30, 0xC3, 0x0C, 0x61,", "650, 11, 12, 11, 0, -11 ], # 0x52 'R' [ 667, 10,", "0x43, 0x31, 0x88, 0x62, 0x30, 0xF0, 0x60, 0x10, 0x04, 0x03, 0x80, 0x0F, 0x18,", "0x2D '-' [ 124, 2, 2, 5, 0, -1 ], # 0x2E '.'", "2 ], # 0x5F '_' [ 851, 3, 3, 5, 2, -11 ],", "0x02, 0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0,", "1, -11 ], # 0x37 '7' [ 245, 9, 13, 9, 1, -12", "0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38, 0x78, 0x60, 0x83, 0x04, 0x2C, 0x41, 0x22,", "-11 ], # 0x68 'h' [ 957, 4, 12, 4, 1, -11 ],", "62, 12, 12, 14, 1, -11 ], # 0x26 '&' [ 80, 2,", "0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00, 0x33, 0x00,", "[ 323, 13, 12, 14, 1, -11 ], # 0x40 '@' [ 343,", "0x03, 0x03, 0xC0, 0x76, 0x50, 0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8,", "16, 2, -11 ], # 0x57 'W' [ 757, 12, 12, 12, 0,", "0x00, 0x60, 0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38,", "0x1F, 0x80, 0x01, 0x00, 0x60, 0x14, 0x04, 0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04,", "0x10, 0x20, 0x40, 0x72, 0x0E, 0x08, 0x61, 0x04, 0x30, 0x86, 0x08, 0x61, 0x04,", "13, 12, 14, 1, -11 ], # 0x40 '@' [ 343, 11, 11,", "12, 15, 0, -11 ], # 0x4D 'M' [ 575, 13, 12, 12,", "0x00, 0x00, 0xCC, 0x33, 0x00, 0x00, 0x44, 0x48, 0x01, 0x83, 0x86, 0x1C, 0x0C,", "0x82, 0x20, 0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11, 0x04, 0x83,", "0xA4, 0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0,", "0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38, 0x86, 0xB2, 0x0D, 0x24, 0x1C, 0x50,", "1 ], # 0x20 ' ' [ 0, 4, 12, 6, 1, -11", "9, 13, 9, 1, -12 ], # 0x30 '0' [ 152, 6, 13,", "'v' [ 1089, 11, 8, 12, 1, -7 ], # 0x77 'w' [", "'B' [ 376, 12, 12, 11, 1, -11 ], # 0x43 'C' [", "], # 0x55 'U' [ 717, 11, 12, 12, 2, -11 ], #", "0x03, 0x80, 0x30, 0x07, 0x00, 0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xF8, 0xC0,", "# 0x72 'r' [ 1061, 7, 8, 6, 0, -7 ], # 0x73", "0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10, 0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02,", "1, -12 ], # 0x30 '0' [ 152, 6, 13, 9, 1, -12", "-11 ], # 0x6C 'l' [ 995, 13, 8, 13, 0, -7 ],", "0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39, 0x91, 0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F,", "7, 0, -7 ], # 0x63 'c' [ 884, 9, 12, 9, 0,", "# 0x64 'd' [ 898, 7, 8, 7, 0, -7 ], # 0x65", "853, 9, 8, 9, 0, -7 ], # 0x61 'a' [ 862, 9,", "0x70, 0x88, 0x46, 0x23, 0x20, 0x90, 0x50, 0x28, 0x18, 0x08, 0x08, 0x08, 0x18,", "'[' [ 1144, 1, 12, 5, 2, -11 ], # 0x7C '|' [", "0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02, 0x06, 0x0C, 0x08, 0x10, 0x20,", "0xC1, 0x10, 0x19, 0x95, 0x43, 0x01, 0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88,", "11, 2, -11 ], # 0x59 'Y' [ 790, 11, 12, 10, 0,", "0xE2, 0x2C, 0x44, 0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95,", "], # 0x47 'G' [ 468, 14, 12, 13, 0, -11 ], #", "0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0, 0x63, 0x82, 0x0C, 0x30,", "0x2C ',' [ 123, 4, 1, 6, 1, -3 ], # 0x2D '-'", "0x8C, 0x6C, 0x36, 0x1F, 0x0F, 0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80,", "1, -7 ], # 0x77 'w' [ 1100, 9, 8, 8, -1, -7", "[ 995, 13, 8, 13, 0, -7 ], # 0x6D 'm' [ 1008,", "0x44 'D' [ 414, 12, 12, 10, 0, -11 ], # 0x45 'E'", "0xC1, 0x98, 0x36, 0x07, 0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13,", "0x00, 0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06, 0x00, 0x0F,", "0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C, 0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87,", "5, 4, 6, 3, -11 ], # 0x22 '\"' [ 9, 10, 12,", "0x03, 0x80, 0x60, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E, 0x00, 0x01, 0x83,", "9, 0, -11 ], # 0x68 'h' [ 957, 4, 12, 4, 1,", "0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0, 0x3E, 0x3C,", "0x3A ':' [ 279, 4, 10, 4, 1, -7 ], # 0x3B ''", "849, 9, 1, 9, 0, 2 ], # 0x5F '_' [ 851, 3,", "0x61, 0x21, 0x11, 0x18, 0x88, 0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F, 0x84, 0x04,", "0x06, 0x03, 0x01, 0x01, 0x1F, 0x00, 0x01, 0x01, 0x81, 0x41, 0x61, 0x21, 0x11,", "0, -7 ], # 0x6F 'o' [ 1025, 10, 12, 8, -1, -7", "0x90, 0x14, 0x03, 0x00, 0x60, 0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11,", "1, -7 ], # 0x76 'v' [ 1089, 11, 8, 12, 1, -7", "9, 15, 9, 1, -12 ], # 0x24 '$' [ 41, 14, 12,", "# 0x3E '>' [ 312, 7, 12, 8, 2, -11 ], # 0x3F", "0, -7 ], # 0x71 'q' [ 1054, 7, 8, 7, 0, -7", "0x2F '/' [ 137, 9, 13, 9, 1, -12 ], # 0x30 '0'", "'U' [ 717, 11, 12, 12, 2, -11 ], # 0x56 'V' [", "0, -11 ], # 0x44 'D' [ 414, 12, 12, 10, 0, -11", "0x6C, 0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00, 0x33,", "0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60, 0x06, 0x00, 0xB0, 0x13, 0x02, 0x18, 0x61,", "757, 12, 12, 12, 0, -11 ], # 0x58 'X' [ 775, 10,", "0xC8, 0x34, 0x05, 0x01, 0x80, 0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4,", "0x02, 0x06, 0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1,", "0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08, 0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30,", "0x04, 0x1C, 0x30, 0x40, 0x83, 0x04, 0x08, 0x20, 0x60, 0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs", "0x87, 0x07, 0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1, 0x88, 0xC3, 0xC0,", "-8 ], # 0x74 't' [ 1074, 8, 8, 9, 1, -7 ],", "0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D, 0x84, 0x28, 0x21, 0x41, 0x06, 0x10, 0x21,", "0x0F, 0x80, 0x07, 0x91, 0x87, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC1, 0xFC,", "0x43, 0x01, 0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46, 0x23, 0x20, 0x90,", "0x06, 0x10, 0x21, 0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0, 0x36, 0x07, 0xC0,", "0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01, 0xE0,", "0, -11 ], # 0x35 '5' [ 216, 9, 13, 9, 1, -12", "0x98, 0x31, 0x84, 0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0, 0x38,", "0x03, 0x80, 0x70, 0x18, 0x38, 0x70, 0xC0, 0x80, 0x00, 0x3C, 0x8C, 0x18, 0x30,", "9, 7, 0, -7 ], # 0x7A 'z' [ 1132, 6, 15, 7,", "0x62, 0x30, 0xF0, 0x60, 0x10, 0x04, 0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C, 0x2C,", "0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0, 0x38, 0x0C, 0x06, 0x03, 0x01,", "0x21, 0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1, 0x80, 0x30, 0x04, 0x01, 0x80, 0x78,", "0x80, 0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4, 0x19, 0x03, 0x00, 0xC0,", "0x83, 0xF0, 0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81,", "0x09, 0x10, 0x4D, 0x84, 0x28, 0x21, 0x41, 0x06, 0x10, 0x21, 0xE1, 0x00, 0x07,", "], # 0x67 'g' [ 943, 9, 12, 9, 0, -11 ], #", "0x30, 0x00, 0xE2, 0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0, 0x83, 0x04,", "0x57 'W' [ 757, 12, 12, 12, 0, -11 ], # 0x58 'X'", "0xB0, 0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00,", "'u' [ 1082, 7, 8, 8, 1, -7 ], # 0x76 'v' [", "301, 9, 9, 10, 1, -8 ], # 0x3E '>' [ 312, 7,", "8, 12, 5, 0, -11 ], # 0x2F '/' [ 137, 9, 13,", "0x21, 0x93, 0x8F, 0x00, 0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01,", "0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08, 0x61, 0x8C, 0x30, 0xC3, 0x0C,", "0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43, 0x01, 0x80, 0xC0, 0xA0,", "0x06, 0x04, 0x08, 0x08, 0x10, 0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C,", "0x26, 0x48, 0x26, 0x98, 0x27, 0x18, 0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38,", "0x81, 0x02, 0x04, 0x10, 0x20, 0x40, 0x82, 0x04, 0x08, 0x1C, 0x00, 0x81, 0x04,", "11, 15, 12, 1, -11 ], # 0x51 'Q' [ 650, 11, 12,", "0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01, 0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F,", "0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77, 0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62,", "0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38, 0x86, 0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38,", "0x18, 0x88, 0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F, 0x84, 0x04, 0x03, 0x80, 0x60,", "0xC0, 0xF8, 0xEC, 0x0C, 0x81, 0x18, 0x43, 0x08, 0x62, 0x0C, 0x81, 0x90, 0x14,", "0x67, 0x83, 0x03, 0x02, 0x06, 0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08,", "0x13, 0x01, 0x80, 0x70, 0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C,", "1, -12 ], # 0x38 '8' [ 260, 9, 13, 9, 0, -12", "0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04, 0x08, 0x08, 0x10, 0x30, 0x20, 0x60, 0x40,", "2, 4, 5, 0, -1 ], # 0x2C ',' [ 123, 4, 1,", "0xCF, 0x0E, 0x28, 0x30, 0xA0, 0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05,", "929, 9, 12, 8, 0, -7 ], # 0x67 'g' [ 943, 9,", "5, 0, 1 ], # 0x20 ' ' [ 0, 4, 12, 6,", "0x80, 0x30, 0x06, 0x00, 0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04,", "0x71 'q' [ 1054, 7, 8, 7, 0, -7 ], # 0x72 'r'", "0xC0, 0x38, 0x07, 0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9,", "], # 0x3F '?' [ 323, 13, 12, 14, 1, -11 ], #", "], # 0x75 'u' [ 1082, 7, 8, 8, 1, -7 ], #", "0x03, 0xC0, 0x76, 0x50, 0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0,", "0x3D '=' [ 301, 9, 9, 10, 1, -8 ], # 0x3E '>'", "7, 0, -12 ], # 0x7D ']' [ 1160, 8, 3, 10, 1,", "0x04, 0x61, 0x86, 0x30, 0xC4, 0x19, 0x86, 0x7F, 0x80, 0x07, 0x91, 0x86, 0x30,", "# 0x62 'b' [ 876, 8, 8, 7, 0, -7 ], # 0x63", "[ 989, 4, 12, 5, 1, -11 ], # 0x6C 'l' [ 995,", "0x38, 0x20, 0x82, 0x08, 0x41, 0x04, 0x10, 0xC2, 0x08, 0x20, 0x8C, 0x00, 0x18,", "0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02, 0x04, 0x10, 0x20, 0x40, 0x82, 0x04,", "0x0E, 0x10, 0x20, 0x81, 0x02, 0x04, 0x10, 0x20, 0x40, 0x82, 0x04, 0x08, 0x1C,", "0x56 'V' [ 734, 15, 12, 16, 2, -11 ], # 0x57 'W'", "0x24, 0x11, 0x04, 0x83, 0x20, 0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70,", "717, 11, 12, 12, 2, -11 ], # 0x56 'V' [ 734, 15,", "0x4D 'M' [ 575, 13, 12, 12, 0, -11 ], # 0x4E 'N'", "0x78 'x' [ 1109, 9, 12, 9, 0, -7 ], # 0x79 'y'", "0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1, 0x98, 0x31, 0x84,", "], # 0x50 'P' [ 629, 11, 15, 12, 1, -11 ], #", "0xC4, 0x19, 0x86, 0x7F, 0x80, 0x07, 0x91, 0x86, 0x30, 0x26, 0x02, 0x60, 0x0C,", "0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65, 0x8B, 0x2F, 0x98,", "']' [ 842, 8, 7, 8, 0, -11 ], # 0x5E '^' [", "1, -12 ], # 0x36 '6' [ 231, 9, 12, 9, 1, -11", "0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0, 0xC5,", "# 0x32 '2' [ 174, 9, 12, 9, 0, -11 ], # 0x33", "[ 80, 2, 4, 4, 3, -11 ], # 0x27 ''' [ 81,", "9, 13, 9, 0, -12 ], # 0x39 '9' [ 275, 4, 8,", "3, -11 ], # 0x22 '\"' [ 9, 10, 12, 9, 0, -11", "-8 ], # 0x3C '<' [ 295, 9, 5, 12, 2, -6 ],", "8, 8, -1, -7 ], # 0x78 'x' [ 1109, 9, 12, 9,", "231, 9, 12, 9, 1, -11 ], # 0x37 '7' [ 245, 9,", "0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01, 0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00,", "0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08, 0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3,", "1, -11 ], # 0x69 'i' [ 963, 7, 16, 5, -1, -11", "0x04, 0x18, 0x30, 0x41, 0x87, 0x80, 0x0F, 0x81, 0x80, 0x80, 0xC0, 0x60, 0x20,", "# 0x39 '9' [ 275, 4, 8, 4, 1, -7 ], # 0x3A", "0x06, 0x03, 0x03, 0x03, 0x1E, 0x00, 0x01, 0x83, 0x87, 0x07, 0x03, 0x03, 0x73,", "# 0x43 'C' [ 394, 13, 12, 13, 0, -11 ], # 0x44", "0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00, 0x1F, 0x33, 0x60, 0xC0, 0xC0,", "[ 929, 9, 12, 8, 0, -7 ], # 0x67 'g' [ 943,", "0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80, 0x04, 0x70, 0xC3, 0x08, 0x21, 0x86, 0x10,", "6, 3, -11 ], # 0x22 '\"' [ 9, 10, 12, 9, 0,", "'O' [ 612, 11, 12, 10, 0, -11 ], # 0x50 'P' [", "8, 7, 0, -7 ], # 0x63 'c' [ 884, 9, 12, 9,", "9, 0, -11 ], # 0x23 '#' [ 24, 9, 15, 9, 1,", "0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E, 0x6A, 0x93,", "0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30, 0x31, 0x03, 0x18, 0x71, 0xFE,", "9, 0, -11 ], # 0x33 '3' [ 188, 9, 12, 9, 0,", "'f' [ 929, 9, 12, 8, 0, -7 ], # 0x67 'g' [", "0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1, 0x98, 0x31, 0x84, 0x31,", "0x04, 0x02, 0x03, 0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30,", "], # 0x42 'B' [ 376, 12, 12, 11, 1, -11 ], #", "0xF4, 0x7C, 0x38, 0x78, 0x60, 0x83, 0x04, 0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D,", "0xE0, 0x3F, 0xE4, 0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40, 0x18,", "80, 2, 4, 4, 3, -11 ], # 0x27 ''' [ 81, 6,", "11, 1, -11 ], # 0x43 'C' [ 394, 13, 12, 13, 0,", "10, 0, -11 ], # 0x4C 'L' [ 551, 16, 12, 15, 0,", "[ 853, 9, 8, 9, 0, -7 ], # 0x61 'a' [ 862,", "0x40, 0x66, 0x07, 0x30, 0x31, 0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2,", "0x10, 0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04, 0x08, 0x08, 0x10,", "0x66, 0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08,", "0x00, 0x0F, 0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18, 0x38, 0x70, 0xC0, 0x80,", "0x01, 0xEF, 0x66, 0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08,", "], # 0x6F 'o' [ 1025, 10, 12, 8, -1, -7 ], #", "0x33, 0x38, 0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0, 0x76, 0x50, 0xC1, 0x06, 0x08,", "8, 7, 0, -7 ], # 0x65 'e' [ 905, 11, 17, 8,", "0, -11 ], # 0x50 'P' [ 629, 11, 15, 12, 1, -11", "# 0x2C ',' [ 123, 4, 1, 6, 1, -3 ], # 0x2D", "0, -11 ], # 0x58 'X' [ 775, 10, 12, 11, 2, -11", "0x04, 0x10, 0xC2, 0x08, 0x20, 0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24, 0x46, 0x42,", "0x20, 0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8,", "0x00, 0x30, 0xCA, 0x5E, 0x6A, 0x93, 0x08, 0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8,", "0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08, 0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41,", "11, 8, 12, 1, -7 ], # 0x77 'w' [ 1100, 9, 8,", "0xF8, 0xEC, 0x0C, 0x81, 0x18, 0x43, 0x08, 0x62, 0x0C, 0x81, 0x90, 0x14, 0x03,", "13, 9, 1, -12 ], # 0x36 '6' [ 231, 9, 12, 9,", "# 0x4F 'O' [ 612, 11, 12, 10, 0, -11 ], # 0x50", "0x07, 0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9, 0x8E, 0x31,", "0x29 ')' [ 105, 6, 8, 9, 3, -11 ], # 0x2A '#'", "0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x00, 0x60, 0x0F, 0x80, 0x07, 0x91, 0x87,", "0xC3, 0x01, 0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00, 0x38, 0x08, 0x04, 0x02,", "# 0x2E '.' [ 125, 8, 12, 5, 0, -11 ], # 0x2F", "0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77, 0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66,", "0x20, 0xC1, 0x82, 0x04, 0x1C, 0x30, 0x40, 0x83, 0x04, 0x08, 0x20, 0x60, 0x99,", "0x7F, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E,", "0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C, 0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C,", "0x60, 0xC1, 0xE0, 0x38, 0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F, 0x00, 0x01, 0x01,", "0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31, 0x88, 0x62, 0x30, 0xF0,", "0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C, 0x3C, 0x3E,", "'Y' [ 790, 11, 12, 10, 0, -11 ], # 0x5A 'Z' [", "0x55 'U' [ 717, 11, 12, 12, 2, -11 ], # 0x56 'V'", "0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E, 0x6A, 0x93, 0x08, 0x08, 0x04, 0x02, 0x01,", "0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44, 0xD8, 0x9D, 0x23,", "0x66, 0x33, 0xB0, 0xE0, 0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E,", "], # 0x49 'I' [ 500, 9, 12, 8, 0, -11 ], #", "# 0x3C '<' [ 295, 9, 5, 12, 2, -6 ], # 0x3D", "0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F, 0x00, 0x01, 0x01, 0x81, 0x41, 0x61, 0x21,", "0x10, 0x37, 0x22, 0x24, 0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64,", "0x45 'E' [ 432, 12, 12, 10, 0, -11 ], # 0x46 'F'", "0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E, 0x77, 0x3B, 0x33, 0x62,", "612, 11, 12, 10, 0, -11 ], # 0x50 'P' [ 629, 11,", "0x7B '[' [ 1144, 1, 12, 5, 2, -11 ], # 0x7C '|'", "0x01, 0x80, 0x30, 0x04, 0x01, 0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C,", "0x43, 0x08, 0x62, 0x0C, 0x81, 0x90, 0x14, 0x03, 0x00, 0x60, 0x08, 0x00, 0xFB,", "0x10, 0x21, 0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0, 0x36, 0x07, 0xC0, 0xF0,", "# 0x77 'w' [ 1100, 9, 8, 8, -1, -7 ], # 0x78", "-8 ], # 0x3E '>' [ 312, 7, 12, 8, 2, -11 ],", "0x02, 0x60, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0,", "7, 15, 7, 0, -11 ], # 0x5B '[' [ 821, 6, 12,", "0x08, 0x08, 0x18, 0x00, 0x3F, 0x42, 0x04, 0x08, 0x10, 0x20, 0x40, 0x72, 0x0E,", "0x02, 0x00, 0x40, 0x3E, 0x03, 0x00, 0x40, 0x08, 0x01, 0x00, 0x60, 0x0C, 0x01,", "12, 13, 0, -11 ], # 0x48 'H' [ 489, 7, 12, 6,", "[ 943, 9, 12, 9, 0, -11 ], # 0x68 'h' [ 957,", "0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01, 0x80, 0x30, 0x04, 0x01, 0x80,", "'X' [ 775, 10, 12, 11, 2, -11 ], # 0x59 'Y' [", "6, 1, -11 ], # 0x21 '!' [ 6, 5, 4, 6, 3,", "0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10, 0x08, 0x00, 0x56, 0xF0, 0xF0,", "0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9, 0x8E, 0x31, 0x04, 0x01, 0x80, 0x30,", "16, 7, 0, -12 ], # 0x7D ']' [ 1160, 8, 3, 10,", "[ 312, 7, 12, 8, 2, -11 ], # 0x3F '?' [ 323,", "0x08, 0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00, 0x10, 0x40, 0x82,", "0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00, 0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86,", "0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82, 0x04, 0x1C, 0x30, 0x40,", "0x00, 0x01, 0x83, 0x87, 0x07, 0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1,", "0x40, 0x40, 0x60, 0x20, 0x20, 0x10, 0x10, 0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC,", "0x98, 0x23, 0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F, 0xC1, 0x8C,", "9, 12, 8, 0, -11 ], # 0x4A 'J' [ 514, 13, 12,", "# 0x55 'U' [ 717, 11, 12, 12, 2, -11 ], # 0x56", "# 0x7D ']' [ 1160, 8, 3, 10, 1, -5 ] ] #", "0x38, 0x86, 0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80, 0x42, 0x01,", "], # 0x77 'w' [ 1100, 9, 8, 8, -1, -7 ], #", "0x24, 0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77,", "[ 1146, 7, 16, 7, 0, -12 ], # 0x7D ']' [ 1160,", "0x01, 0x00, 0x20, 0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1,", "# 0x51 'Q' [ 650, 11, 12, 11, 0, -11 ], # 0x52", "0x33 '3' [ 188, 9, 12, 9, 0, -11 ], # 0x34 '4'", "0, -7 ], # 0x6D 'm' [ 1008, 8, 8, 9, 0, -7", "0x00, 0x60, 0x0F, 0x80, 0x07, 0x91, 0x87, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00,", "0, -7 ], # 0x65 'e' [ 905, 11, 17, 8, -1, -12", "0x49 'I' [ 500, 9, 12, 8, 0, -11 ], # 0x4A 'J'", "0x80, 0x42, 0x01, 0x04, 0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00,", "], # 0x2F '/' [ 137, 9, 13, 9, 1, -12 ], #", "1, -11 ], # 0x26 '&' [ 80, 2, 4, 4, 3, -11", "8, 12, 8, 0, -11 ], # 0x6B 'k' [ 989, 4, 12,", "0x0F, 0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80, 0x04, 0x70, 0xC3, 0x08,", "8, 13, 0, -7 ], # 0x6D 'm' [ 1008, 8, 8, 9,", "0x1C, 0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26,", "0x38, 0x08, 0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F,", "0x08, 0x18, 0x00, 0x3F, 0x42, 0x04, 0x08, 0x10, 0x20, 0x40, 0x72, 0x0E, 0x08,", "# 0x42 'B' [ 376, 12, 12, 11, 1, -11 ], # 0x43", "0x6A, 0x93, 0x08, 0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10, 0x08,", "[ 682, 11, 12, 11, 2, -11 ], # 0x54 'T' [ 699,", "0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03,", "0x95, 0x43, 0x01, 0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46, 0x23, 0x20,", "# 0x3B '' [ 284, 9, 9, 10, 1, -8 ], # 0x3C", "[ 575, 13, 12, 12, 0, -11 ], # 0x4E 'N' [ 595,", "0x60, 0x10, 0x04, 0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C, 0x2C, 0x26, 0x33, 0x38,", "0x60, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F,", "0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0, 0x3E, 0x3C, 0x30,", "-11 ], # 0x46 'F' [ 450, 12, 12, 12, 1, -11 ],", "0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C, 0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04, 0x02,", "9, 1, 9, 0, 2 ], # 0x5F '_' [ 851, 3, 3,", "0x88, 0x1B, 0x8F, 0x80, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04,", "0x80, 0x07, 0x91, 0x86, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC0, 0x0C, 0x00,", "0xCD, 0x86, 0xC3, 0x61, 0xB1, 0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40,", "0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0, 0x63, 0x82,", "0x83, 0x7F, 0xE0, 0x1C, 0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C,", "0x7C, 0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31, 0x88,", "# 0x38 '8' [ 260, 9, 13, 9, 0, -12 ], # 0x39", "], # 0x5C '\\' [ 830, 6, 15, 7, 1, -11 ], #", "0x0C, 0x61, 0x84, 0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E, 0x6A, 0x93, 0x08, 0x08,", "0x87, 0x31, 0x8C, 0x43, 0x31, 0x88, 0x62, 0x30, 0xF0, 0x60, 0x10, 0x04, 0x03,", "0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44, 0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C,", "0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C, 0x9D, 0xEE, 0x62, 0xC4,", "'M' [ 575, 13, 12, 12, 0, -11 ], # 0x4E 'N' [", "0x82, 0x04, 0x1C, 0x30, 0x40, 0x83, 0x04, 0x08, 0x20, 0x60, 0x99, 0x8E ]", "842, 8, 7, 8, 0, -11 ], # 0x5E '^' [ 849, 9,", "0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C, 0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3,", "0x08, 0x21, 0x08, 0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00, 0x10,", "0x04, 0x70, 0x38, 0x10, 0x10, 0x10, 0x37, 0x22, 0x24, 0x38, 0x78, 0x48, 0x4D,", "], # 0x52 'R' [ 667, 10, 12, 8, 0, -11 ], #", "0x26, 0x02, 0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83,", "0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4, 0x19, 0x86, 0x7F, 0x80,", "0x80, 0x80, 0x40, 0x40, 0x60, 0x20, 0x20, 0x10, 0x10, 0x18, 0x08, 0x00, 0x1E,", "9, 12, 9, 0, -11 ], # 0x68 'h' [ 957, 4, 12,", "0x78, 0x01, 0x80, 0x40, 0x60, 0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33,", "0x21, 0x41, 0x06, 0x10, 0x21, 0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0, 0x36,", "FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps, FreeSerifItalic9pt7bGlyphs, 0x20, 0x7E, 22 ] # Approx. 1835 bytes", "0x61, 0x86, 0x30, 0xC4, 0x19, 0x86, 0x7F, 0x80, 0x07, 0x91, 0x86, 0x30, 0x26,", "0xB1, 0x05, 0x84, 0x26, 0x20, 0x99, 0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0,", "9, 0, -7 ], # 0x6F 'o' [ 1025, 10, 12, 8, -1,", "-12 ], # 0x38 '8' [ 260, 9, 13, 9, 0, -12 ],", "0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C, 0x36, 0x1B,", "-7 ], # 0x71 'q' [ 1054, 7, 8, 7, 0, -7 ],", "12, 9, 1, -11 ], # 0x37 '7' [ 245, 9, 13, 9,", "7, 8, 6, 0, -7 ], # 0x73 's' [ 1068, 5, 9,", "# 0x46 'F' [ 450, 12, 12, 12, 1, -11 ], # 0x47", "1061, 7, 8, 6, 0, -7 ], # 0x73 's' [ 1068, 5,", "0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1, 0x80, 0x30, 0x04, 0x01, 0x80,", "'|' [ 1146, 7, 16, 7, 0, -12 ], # 0x7D ']' [", "7, 1, -11 ], # 0x7B '[' [ 1144, 1, 12, 5, 2,", "0x80, 0x70, 0x18, 0x38, 0x70, 0xC0, 0x80, 0x00, 0x3C, 0x8C, 0x18, 0x30, 0xC3,", "0x31 '1' [ 162, 8, 12, 9, 1, -11 ], # 0x32 '2'", "0x18, 0x18, 0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8, 0x80, 0x1F, 0x98,", "0x10, 0x19, 0x95, 0x43, 0x01, 0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46,", "0x3C, 0x78, 0x1E, 0x18, 0x20, 0xC1, 0x83, 0x04, 0x18, 0x30, 0x41, 0x87, 0x80,", "0x1E, 0x00, 0x3E, 0x78, 0x61, 0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58,", "# 0x47 'G' [ 468, 14, 12, 13, 0, -11 ], # 0x48", "0x12, 0x22, 0x24, 0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20, 0x98, 0x24,", "], # 0x44 'D' [ 414, 12, 12, 10, 0, -11 ], #", "0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10, 0x08, 0x00, 0x56, 0xF0,", "'V' [ 734, 15, 12, 16, 2, -11 ], # 0x57 'W' [", "], # 0x38 '8' [ 260, 9, 13, 9, 0, -12 ], #", "12, 12, 1, -11 ], # 0x47 'G' [ 468, 14, 12, 13,", "6, 8, 9, 3, -11 ], # 0x2A '#' [ 111, 9, 9,", "0x80, 0x20, 0x0C, 0x01, 0x80, 0x30, 0x04, 0x01, 0x80, 0x30, 0x04, 0x0D, 0x83,", "0x60, 0x0C, 0x01, 0x00, 0x20, 0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8,", "9, 12, 8, 0, -7 ], # 0x67 'g' [ 943, 9, 12,", "0x06, 0x0C, 0x00, 0x33, 0x00, 0x00, 0xCC, 0x33, 0x00, 0x00, 0x44, 0x48, 0x01,", "0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0, 0x63, 0x82, 0x0C, 0x30, 0x31,", "9, 10, 12, 9, 0, -11 ], # 0x23 '#' [ 24, 9,", "12, 1, -8 ], # 0x2B '+' [ 122, 2, 4, 5, 0,", "0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60, 0x06, 0x00, 0xB0, 0x13,", "0x84, 0x28, 0x21, 0x41, 0x06, 0x10, 0x21, 0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1,", "0x30, 0x40, 0x83, 0x04, 0x08, 0x20, 0x60, 0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs = [", "0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78, 0x61, 0x82, 0x10, 0x31, 0x01,", "0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C, 0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F,", "], # 0x29 ')' [ 105, 6, 8, 9, 3, -11 ], #", "0x59 'Y' [ 790, 11, 12, 10, 0, -11 ], # 0x5A 'Z'", "0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01, 0x80, 0x30, 0x04, 0x01,", "0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65, 0x8B, 0x2F, 0x98, 0x31,", "0x08, 0x00, 0x30, 0xCA, 0x5E, 0x6A, 0x93, 0x08, 0x08, 0x04, 0x02, 0x01, 0x0F,", "0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02, 0x06, 0x0C, 0x08, 0x10, 0x20, 0x42,", "0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00, 0x3F, 0xC1, 0x9C, 0x21,", "[ 957, 4, 12, 4, 1, -11 ], # 0x69 'i' [ 963,", "12, 0, -11 ], # 0x4E 'N' [ 595, 11, 12, 12, 1,", "0, -7 ], # 0x7A 'z' [ 1132, 6, 15, 7, 1, -11", "2, -11 ], # 0x56 'V' [ 734, 15, 12, 16, 2, -11", "0x00, 0x00, 0x00, 0xC0, 0x83, 0x04, 0x08, 0x10, 0x60, 0x81, 0x02, 0x04, 0x70,", "0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40, 0x60, 0x20, 0xF1, 0x89,", "0x65 'e' [ 905, 11, 17, 8, -1, -12 ], # 0x66 'f'", "0x38, 0x08, 0x04, 0x02, 0x03, 0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8,", "], # 0x6C 'l' [ 995, 13, 8, 13, 0, -7 ], #", "# 0x4E 'N' [ 595, 11, 12, 12, 1, -11 ], # 0x4F", "0xC2, 0x08, 0x20, 0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF,", "0x28, 0x18, 0x08, 0x08, 0x08, 0x18, 0x00, 0x3F, 0x42, 0x04, 0x08, 0x10, 0x20,", "0xD8, 0x7C, 0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31,", "0xCC, 0x0C, 0x60, 0x83, 0xF0, 0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x18,", "], # 0x61 'a' [ 862, 9, 12, 9, 0, -11 ], #", "0x8F, 0xF8, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03,", "0x03, 0x00, 0x60, 0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43,", "0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11,", "0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38, 0x86, 0xB2, 0x0D,", "-11 ], # 0x32 '2' [ 174, 9, 12, 9, 0, -11 ],", "-11 ], # 0x4E 'N' [ 595, 11, 12, 12, 1, -11 ],", "0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46, 0x23, 0x20, 0x90, 0x50, 0x28, 0x18, 0x08,", "0x31, 0x86, 0x31, 0x87, 0xE1, 0x80, 0x30, 0x04, 0x01, 0x80, 0x78, 0x00, 0x07,", "0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01, 0x80, 0x70, 0xB7, 0xE0, 0x3F, 0xC1, 0x8C,", "FreeSerifItalic9pt7bGlyphs = [ [ 0, 0, 0, 5, 0, 1 ], # 0x20", "9, 0, -11 ], # 0x64 'd' [ 898, 7, 8, 7, 0,", "')' [ 105, 6, 8, 9, 3, -11 ], # 0x2A '#' [", "'k' [ 989, 4, 12, 5, 1, -11 ], # 0x6C 'l' [", "0xEE, 0x38, 0x08, 0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93,", "0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66, 0x24, 0x24, 0xC3, 0x8C,", "0x38, 0xA0, 0x21, 0x80, 0x42, 0x01, 0x04, 0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40,", "0x01, 0x81, 0x41, 0x61, 0x21, 0x11, 0x18, 0x88, 0xFF, 0x02, 0x03, 0x01, 0x00,", "0x83, 0x04, 0x18, 0x30, 0x41, 0x87, 0x80, 0x0F, 0x81, 0x80, 0x80, 0xC0, 0x60,", "# 0x37 '7' [ 245, 9, 13, 9, 1, -12 ], # 0x38", "0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44, 0xD8,", "9, 12, 9, 0, -11 ], # 0x33 '3' [ 188, 9, 12,", "0x20, 0x40, 0x82, 0x04, 0x08, 0x1C, 0x00, 0x81, 0x04, 0x18, 0x20, 0xC1, 0x04,", "0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00, 0x1F, 0x33, 0x60, 0xC0,", "[ 963, 7, 16, 5, -1, -11 ], # 0x6A 'j' [ 977,", "0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1, 0x98, 0x31, 0x84, 0x31, 0x86, 0x78, 0x70,", "[ 1016, 9, 8, 9, 0, -7 ], # 0x6F 'o' [ 1025,", "[ 111, 9, 9, 12, 1, -8 ], # 0x2B '+' [ 122,", "0x0C, 0x18, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20, 0xC1,", "-11 ], # 0x50 'P' [ 629, 11, 15, 12, 1, -11 ],", "0x38, 0x70, 0xC0, 0x80, 0x00, 0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40,", "10, 12, 11, 2, -11 ], # 0x59 'Y' [ 790, 11, 12,", "0x05, 0x01, 0x80, 0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4, 0x19, 0x03,", "15, 9, 1, -12 ], # 0x24 '$' [ 41, 14, 12, 15,", "0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00, 0x60, 0x14, 0x04, 0xC0, 0x98, 0x23, 0x07,", "0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00, 0x38, 0x08, 0x04, 0x02, 0x03, 0x39,", "13, 9, 1, -12 ], # 0x38 '8' [ 260, 9, 13, 9,", "0x88, 0x46, 0x23, 0x20, 0x90, 0x50, 0x28, 0x18, 0x08, 0x08, 0x08, 0x18, 0x00,", "0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00, 0x10, 0x40, 0x82, 0x0C, 0x30, 0xC3,", "6, 15, 6, 1, -11 ], # 0x28 '(' [ 93, 6, 15,", "2, -11 ], # 0x55 'U' [ 717, 11, 12, 12, 2, -11", "0x0C, 0x01, 0x00, 0x20, 0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC,", "0x61, 0x83, 0xE0, 0x3F, 0xC0, 0x63, 0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C,", "0x36, 0x1F, 0x0F, 0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80, 0x04, 0x70,", "1, -11 ], # 0x51 'Q' [ 650, 11, 12, 11, 0, -11", "0, -7 ], # 0x63 'c' [ 884, 9, 12, 9, 0, -11", "-11 ], # 0x5E '^' [ 849, 9, 1, 9, 0, 2 ],", "0x06, 0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0,", "0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x04,", "312, 7, 12, 8, 2, -11 ], # 0x3F '?' [ 323, 13,", "0x42, 0x83, 0xFF, 0x80, 0xD8, 0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33,", "15, 7, 1, -11 ], # 0x5D ']' [ 842, 8, 7, 8,", "4, 1, -11 ], # 0x69 'i' [ 963, 7, 16, 5, -1,", "0x50, 0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C,", "0, -11 ], # 0x53 'S' [ 682, 11, 12, 11, 2, -11", "0, -11 ], # 0x64 'd' [ 898, 7, 8, 7, 0, -7", "9, 9, 10, 1, -8 ], # 0x3C '<' [ 295, 9, 5,", "0x07, 0x30, 0x31, 0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21,", "0x18, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20, 0xC1, 0x83,", "0x43 'C' [ 394, 13, 12, 13, 0, -11 ], # 0x44 'D'", "12, 15, 1, -11 ], # 0x25 '%' [ 62, 12, 12, 14,", "0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8, 0x34, 0x05,", "13, 0, -11 ], # 0x44 'D' [ 414, 12, 12, 10, 0,", "0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04, 0x60, 0x46, 0x04, 0x40, 0x8C,", "0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C, 0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14,", "0xE4, 0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40, 0x18, 0x06, 0x05,", "0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0, 0x76, 0x50, 0xC1,", "0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46, 0x23, 0x20, 0x90, 0x50, 0x28,", "0x83, 0x18, 0xC1, 0x98, 0x36, 0x07, 0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31,", "8, -1, -7 ], # 0x70 'p' [ 1040, 9, 12, 9, 0,", "':' [ 279, 4, 10, 4, 1, -7 ], # 0x3B '' [", "0x61, 0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60, 0x33, 0x01,", "' ' [ 0, 4, 12, 6, 1, -11 ], # 0x21 '!'", "0x01, 0x83, 0x86, 0x1C, 0x0C, 0x03, 0x80, 0x30, 0x07, 0x00, 0x80, 0xFF, 0x80,", "12, 10, 0, -11 ], # 0x46 'F' [ 450, 12, 12, 12,", "0x30, 0x04, 0x01, 0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C, 0x07, 0x0C,", "0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04, 0x08, 0x08, 0x10, 0x30, 0x20, 0x60,", "0x00, 0x0F, 0x84, 0x04, 0x03, 0x80, 0x60, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03,", "-10 ], # 0x41 'A' [ 359, 11, 12, 11, 0, -11 ],", "'C' [ 394, 13, 12, 13, 0, -11 ], # 0x44 'D' [", "-7 ], # 0x7A 'z' [ 1132, 6, 15, 7, 1, -11 ],", "[ [ 0, 0, 0, 5, 0, 1 ], # 0x20 ' '", "0x40, 0x83, 0x04, 0x08, 0x20, 0x60, 0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs = [ [", "[ 717, 11, 12, 12, 2, -11 ], # 0x56 'V' [ 734,", "0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00, 0x60, 0x14, 0x04, 0xC0, 0x98, 0x23,", "124, 2, 2, 5, 0, -1 ], # 0x2E '.' [ 125, 8,", "0x83, 0x80, 0x04, 0x70, 0xC3, 0x08, 0x21, 0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C,", "0x10, 0xC2, 0x08, 0x20, 0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24, 0x46, 0x42, 0x83,", "0x76 'v' [ 1089, 11, 8, 12, 1, -7 ], # 0x77 'w'", "0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77, 0x39, 0x5E, 0xCC, 0xCC, 0xCE,", "0x62, 0x0C, 0x81, 0x90, 0x14, 0x03, 0x00, 0x60, 0x08, 0x00, 0xFB, 0xCE, 0x43,", "0x41, 0x02, 0x00, 0x10, 0x40, 0x82, 0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21,", "0x40 '@' [ 343, 11, 11, 12, 0, -10 ], # 0x41 'A'", "0x0E, 0x19, 0x8C, 0x6C, 0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06,", "[ 775, 10, 12, 11, 2, -11 ], # 0x59 'Y' [ 790,", "0x60, 0x14, 0x04, 0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F,", "0x40, 0x04, 0x82, 0x20, 0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11,", "0x70, 0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1,", "0x04, 0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C,", "0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00, 0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0,", "0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4, 0x19, 0x86, 0x7F, 0x80, 0x07,", "0x18, 0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8, 0x80, 0x1F, 0x98, 0x98,", "0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40, 0x18, 0x06, 0x05, 0x81,", "0x06, 0x00, 0x00, 0x00, 0xC0, 0x83, 0x04, 0x08, 0x10, 0x60, 0x81, 0x02, 0x04,", "0x31, 0x04, 0x01, 0x80, 0x30, 0x06, 0x00, 0x80, 0x30, 0x06, 0x00, 0x80, 0x7E,", "0x00, 0x40, 0x08, 0x01, 0x00, 0x60, 0x0C, 0x01, 0x00, 0x20, 0x04, 0x01, 0x00,", "[ 1025, 10, 12, 8, -1, -7 ], # 0x70 'p' [ 1040,", "1132, 6, 15, 7, 1, -11 ], # 0x7B '[' [ 1144, 1,", "0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26, 0x20, 0x99, 0x84, 0x3C, 0x03, 0x80, 0x6C,", "0x61, 0x84, 0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E, 0x6A, 0x93, 0x08, 0x08, 0x04,", "0x01, 0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00, 0x38, 0x08, 0x04, 0x02, 0x03,", "8, 0, -11 ], # 0x5E '^' [ 849, 9, 1, 9, 0,", "-11 ], # 0x45 'E' [ 432, 12, 12, 10, 0, -11 ],", "# 0x65 'e' [ 905, 11, 17, 8, -1, -12 ], # 0x66", "0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8, 0x3F, 0xF0,", "0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4, 0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61,", "# 0x23 '#' [ 24, 9, 15, 9, 1, -12 ], # 0x24", "0x04, 0x83, 0x20, 0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13,", "0x70, 0x18, 0x38, 0x70, 0xC0, 0x80, 0x00, 0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C,", "0x82, 0x04, 0x08, 0x1C, 0x00, 0x81, 0x04, 0x18, 0x20, 0xC1, 0x04, 0x08, 0x20,", "0x19, 0x95, 0x43, 0x01, 0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46, 0x23,", "0, -11 ], # 0x42 'B' [ 376, 12, 12, 11, 1, -11", "0x08, 0x01, 0x00, 0x60, 0x0C, 0x01, 0x00, 0x20, 0x04, 0x01, 0x00, 0xC0, 0x00,", "0x00, 0x58, 0x06, 0x60, 0x33, 0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01,", "1, -11 ], # 0x25 '%' [ 62, 12, 12, 14, 1, -11", "8, 7, 8, 0, -11 ], # 0x5E '^' [ 849, 9, 1,", "0x60, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E, 0x00, 0x01, 0x83, 0x87, 0x07,", "0x86, 0x1C, 0x0C, 0x03, 0x80, 0x30, 0x07, 0x00, 0x80, 0xFF, 0x80, 0x00, 0x00,", "# 0x5E '^' [ 849, 9, 1, 9, 0, 2 ], # 0x5F", "0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02,", "0x04, 0xC1, 0x98, 0x31, 0x84, 0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08,", "9, 13, 9, 1, -12 ], # 0x36 '6' [ 231, 9, 12,", "[ 1089, 11, 8, 12, 1, -7 ], # 0x77 'w' [ 1100,", "-11 ], # 0x3F '?' [ 323, 13, 12, 14, 1, -11 ],", "359, 11, 12, 11, 0, -11 ], # 0x42 'B' [ 376, 12,", "11, 0, -11 ], # 0x52 'R' [ 667, 10, 12, 8, 0,", "12, 9, 0, -7 ], # 0x79 'y' [ 1123, 8, 9, 7,", "12, 1, -11 ], # 0x47 'G' [ 468, 14, 12, 13, 0,", "# 0x5D ']' [ 842, 8, 7, 8, 0, -11 ], # 0x5E", "# 0x69 'i' [ 963, 7, 16, 5, -1, -11 ], # 0x6A", "-7 ], # 0x67 'g' [ 943, 9, 12, 9, 0, -11 ],", "0x30, 0x06, 0x00, 0x80, 0x30, 0x06, 0x00, 0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02,", "0xB1, 0x88, 0x83, 0x80, 0x04, 0x70, 0xC3, 0x08, 0x21, 0x86, 0x10, 0x43, 0x08,", "0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0, 0xC5, 0x03, 0x34,", "0x6D 'm' [ 1008, 8, 8, 9, 0, -7 ], # 0x6E 'n'", "0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01, 0x80, 0x70, 0xB7,", "' [ 0, 4, 12, 6, 1, -11 ], # 0x21 '!' [", "0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44, 0xD8, 0x9D, 0x23, 0xA4, 0x65,", "0x03, 0x01, 0x01, 0x1F, 0x00, 0x01, 0x01, 0x81, 0x41, 0x61, 0x21, 0x11, 0x18,", "0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39, 0x91, 0xCC, 0x93, 0x3B, 0x0E, 0x00,", "0x11, 0x8C, 0x43, 0x38, 0x86, 0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21,", "0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0, 0x38, 0x0C, 0x06, 0x03, 0x01, 0x01,", "0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78, 0x61, 0x82, 0x10, 0x31, 0x01, 0xB0,", "'b' [ 876, 8, 8, 7, 0, -7 ], # 0x63 'c' [", "-7 ], # 0x77 'w' [ 1100, 9, 8, 8, -1, -7 ],", "0xE9, 0x8E, 0x31, 0x04, 0x01, 0x80, 0x30, 0x06, 0x00, 0x80, 0x30, 0x06, 0x00,", "# 0x4A 'J' [ 514, 13, 12, 12, 0, -11 ], # 0x4B", "9, 8, 9, 0, -7 ], # 0x61 'a' [ 862, 9, 12,", "0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C, 0x81, 0x18, 0x43, 0x08, 0x62,", "0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00, 0x60, 0x14, 0x04, 0xC0, 0x98,", "1146, 7, 16, 7, 0, -12 ], # 0x7D ']' [ 1160, 8,", "0x34 '4' [ 202, 9, 12, 9, 0, -11 ], # 0x35 '5'", "6, 15, 7, 1, -11 ], # 0x7B '[' [ 1144, 1, 12,", "0x40, 0xC8, 0x07, 0x00, 0x60, 0x06, 0x00, 0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F,", "'Q' [ 650, 11, 12, 11, 0, -11 ], # 0x52 'R' [", "0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78, 0x61, 0x82, 0x10, 0x31,", "8, 12, 9, 1, -11 ], # 0x32 '2' [ 174, 9, 12,", "], # 0x33 '3' [ 188, 9, 12, 9, 0, -11 ], #", "0x62 'b' [ 876, 8, 8, 7, 0, -7 ], # 0x63 'c'", "0x23 '#' [ 24, 9, 15, 9, 1, -12 ], # 0x24 '$'", "0xF8, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10,", "0x0C, 0x40, 0x66, 0x07, 0x30, 0x31, 0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0,", "0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31, 0x88, 0x62, 0x30, 0xF0, 0x60, 0x10,", "], # 0x25 '%' [ 62, 12, 12, 14, 1, -11 ], #", "0x42 'B' [ 376, 12, 12, 11, 1, -11 ], # 0x43 'C'", "0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26, 0x20, 0x99, 0x84,", "0x06, 0x60, 0x33, 0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20,", "0xF0, 0x60, 0x10, 0x04, 0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C, 0x2C, 0x26, 0x33,", "0x1C, 0x0C, 0x03, 0x80, 0x30, 0x07, 0x00, 0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F,", "-11 ], # 0x5D ']' [ 842, 8, 7, 8, 0, -11 ],", "9, 1, -7 ], # 0x75 'u' [ 1082, 7, 8, 8, 1,", "# 0x29 ')' [ 105, 6, 8, 9, 3, -11 ], # 0x2A", "0x50, 0x38, 0xA0, 0x21, 0x80, 0x42, 0x01, 0x04, 0x00, 0x3E, 0x71, 0x82, 0x0C,", "2, -11 ], # 0x54 'T' [ 699, 12, 12, 13, 2, -11", "0xCC, 0x66, 0x33, 0xB0, 0xE0, 0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0,", "0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4, 0x19, 0x86, 0x7F, 0x80, 0x07, 0x91, 0x86,", "[ 849, 9, 1, 9, 0, 2 ], # 0x5F '_' [ 851,", "0x2B '+' [ 122, 2, 4, 5, 0, -1 ], # 0x2C ','", "0x70, 0xC3, 0x08, 0x21, 0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03,", "# 0x53 'S' [ 682, 11, 12, 11, 2, -11 ], # 0x54", "0xC1, 0xB0, 0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E,", "], # 0x4C 'L' [ 551, 16, 12, 15, 0, -11 ], #", "4, 5, 0, -1 ], # 0x2C ',' [ 123, 4, 1, 6,", "551, 16, 12, 15, 0, -11 ], # 0x4D 'M' [ 575, 13,", "12, 1, -11 ], # 0x4F 'O' [ 612, 11, 12, 10, 0,", "0x00, 0x44, 0x48, 0x01, 0x83, 0x86, 0x1C, 0x0C, 0x03, 0x80, 0x30, 0x07, 0x00,", "12, 14, 1, -11 ], # 0x40 '@' [ 343, 11, 11, 12,", "0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0, 0x38, 0x07, 0x00, 0x60, 0x0C,", "[ 1132, 6, 15, 7, 1, -11 ], # 0x7B '[' [ 1144,", "0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8, 0x34, 0x05, 0x01, 0x80, 0x40, 0x30, 0x0C,", "0x18, 0xD8, 0x7C, 0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43,", "0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01, 0x80, 0x30, 0x04,", "1082, 7, 8, 8, 1, -7 ], # 0x76 'v' [ 1089, 11,", "0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8, 0x80,", "12, 8, -1, -7 ], # 0x70 'p' [ 1040, 9, 12, 9,", "16, 5, -1, -11 ], # 0x6A 'j' [ 977, 8, 12, 8,", "[ 1068, 5, 9, 4, 0, -8 ], # 0x74 't' [ 1074,", "0x7E '~' FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps, FreeSerifItalic9pt7bGlyphs, 0x20, 0x7E, 22 ] # Approx.", "0x02, 0x03, 0x03, 0xC0, 0x76, 0x50, 0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C,", "279, 4, 10, 4, 1, -7 ], # 0x3B '' [ 284, 9,", "0x37, 0x22, 0x24, 0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C,", "'n' [ 1016, 9, 8, 9, 0, -7 ], # 0x6F 'o' [", "12, 9, 0, -11 ], # 0x33 '3' [ 188, 9, 12, 9,", "'S' [ 682, 11, 12, 11, 2, -11 ], # 0x54 'T' [", "0x61, 0x04, 0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1,", "0x0C, 0x10, 0x20, 0xC1, 0x82, 0x04, 0x1C, 0x30, 0x40, 0x83, 0x04, 0x08, 0x20,", "-11 ], # 0x5C '\\' [ 830, 6, 15, 7, 1, -11 ],", "0x08, 0x61, 0x04, 0x30, 0x86, 0x08, 0x61, 0x04, 0x30, 0xC3, 0x8F, 0x00, 0xFF,", "0x03, 0x10, 0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8, 0x3F, 0xF0, 0xC2, 0x08, 0x21,", "0x06, 0x00, 0x80, 0x30, 0x06, 0x00, 0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30,", "0x7F, 0x80, 0x07, 0x91, 0x86, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC0, 0x0C,", "0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01, 0x80, 0x70, 0xB7, 0xE0, 0x3F, 0xC1,", "= [ [ 0, 0, 0, 5, 0, 1 ], # 0x20 '", "-11 ], # 0x47 'G' [ 468, 14, 12, 13, 0, -11 ],", "1008, 8, 8, 9, 0, -7 ], # 0x6E 'n' [ 1016, 9,", "# 0x27 ''' [ 81, 6, 15, 6, 1, -11 ], # 0x28", "0xF9, 0xC8, 0x23, 0x10, 0xC8, 0x34, 0x05, 0x01, 0x80, 0x40, 0x30, 0x0C, 0x03,", "0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4, 0xDA, 0xC7, 0xC1,", "0x42, 0x01, 0x04, 0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60,", "0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43, 0x01, 0x80, 0xC0, 0xA0, 0x91, 0x8E,", "0x06, 0x00, 0x0F, 0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31,", "# 0x6C 'l' [ 995, 13, 8, 13, 0, -7 ], # 0x6D", "629, 11, 15, 12, 1, -11 ], # 0x51 'Q' [ 650, 11,", "0x8C, 0x3F, 0x04, 0xC1, 0x98, 0x31, 0x84, 0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C,", "0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11, 0x04, 0x83, 0x20, 0x1C,", "'q' [ 1054, 7, 8, 7, 0, -7 ], # 0x72 'r' [", "0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00, 0x3F, 0xC1, 0x9C,", "0x0F, 0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39, 0x91,", "-11 ], # 0x59 'Y' [ 790, 11, 12, 10, 0, -11 ],", "8, 0, -11 ], # 0x53 'S' [ 682, 11, 12, 11, 2,", "0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C, 0x81, 0x18,", "0x00, 0x60, 0x0C, 0x01, 0x00, 0x20, 0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19,", "0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9, 0x8E, 0x31, 0x04, 0x01, 0x80, 0x30, 0x06,", "0x20, 0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01,", "'p' [ 1040, 9, 12, 9, 0, -7 ], # 0x71 'q' [", "1, -5 ] ] # 0x7E '~' FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps, FreeSerifItalic9pt7bGlyphs, 0x20,", "], # 0x4E 'N' [ 595, 11, 12, 12, 1, -11 ], #", "0x07, 0x83, 0x18, 0xC1, 0x98, 0x36, 0x07, 0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8,", "0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20, 0x98, 0x24, 0x7F, 0xC4, 0x82,", "0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1, 0x98,", "1, 6, 1, -3 ], # 0x2D '-' [ 124, 2, 2, 5,", "[ 830, 6, 15, 7, 1, -11 ], # 0x5D ']' [ 842,", "], # 0x46 'F' [ 450, 12, 12, 12, 1, -11 ], #", "0x2C, 0x44, 0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43,", "0x04, 0x13, 0x01, 0x80, 0x70, 0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31,", "'t' [ 1074, 8, 8, 9, 1, -7 ], # 0x75 'u' [", "[ 450, 12, 12, 12, 1, -11 ], # 0x47 'G' [ 468,", "0x10, 0xC8, 0x34, 0x05, 0x01, 0x80, 0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F,", "[ 1040, 9, 12, 9, 0, -7 ], # 0x71 'q' [ 1054,", "0x00, 0x10, 0x40, 0x82, 0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08, 0x00,", "[ 343, 11, 11, 12, 0, -10 ], # 0x41 'A' [ 359,", "], # 0x65 'e' [ 905, 11, 17, 8, -1, -12 ], #", "0x49, 0xA4, 0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30,", "0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80, 0x04, 0x70, 0xC3, 0x08, 0x21, 0x86,", "0x91, 0x86, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C,", "0xE2, 0x27, 0x31, 0x39, 0x91, 0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01,", "0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30, 0x31, 0x03, 0x18,", "'(' [ 93, 6, 15, 6, 0, -11 ], # 0x29 ')' [", "-1, -7 ], # 0x70 'p' [ 1040, 9, 12, 9, 0, -7", "0x04, 0x10, 0x20, 0x40, 0x82, 0x04, 0x08, 0x1C, 0x00, 0x81, 0x04, 0x18, 0x20,", "0x18, 0x30, 0x41, 0x87, 0x80, 0x0F, 0x81, 0x80, 0x80, 0xC0, 0x60, 0x20, 0x30,", "10, 0, -11 ], # 0x5A 'Z' [ 807, 7, 15, 7, 0,", "10, 12, 9, 0, -11 ], # 0x23 '#' [ 24, 9, 15,", "12, 12, 0, -11 ], # 0x4E 'N' [ 595, 11, 12, 12,", "'o' [ 1025, 10, 12, 8, -1, -7 ], # 0x70 'p' [", "123, 4, 1, 6, 1, -3 ], # 0x2D '-' [ 124, 2,", "0xCA, 0x5E, 0x6A, 0x93, 0x08, 0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20,", "0x87, 0x80, 0x0F, 0x81, 0x80, 0x80, 0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C, 0x04,", "2, -11 ], # 0x3F '?' [ 323, 13, 12, 14, 1, -11", "[ 862, 9, 12, 9, 0, -11 ], # 0x62 'b' [ 876,", "0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38, 0x86, 0xB2, 0x0D, 0x24, 0x1C,", "12, 12, 12, 0, -11 ], # 0x58 'X' [ 775, 10, 12,", "0xA0, 0x21, 0x80, 0x42, 0x01, 0x04, 0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8,", "0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80, 0x42, 0x01, 0x04, 0x00, 0x3E,", "0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C,", "450, 12, 12, 12, 1, -11 ], # 0x47 'G' [ 468, 14,", "1160, 8, 3, 10, 1, -5 ] ] # 0x7E '~' FreeSerifItalic9pt7b =", "0x60, 0x06, 0x00, 0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23,", "0x20, 0x99, 0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF,", "], # 0x2C ',' [ 123, 4, 1, 6, 1, -3 ], #", "-7 ], # 0x6D 'm' [ 1008, 8, 8, 9, 0, -7 ],", "0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00, 0x33, 0x00, 0x00, 0xCC, 0x33,", "0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4, 0x19, 0x03, 0x00, 0xC0, 0x30,", "0x88, 0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F, 0x84, 0x04, 0x03, 0x80, 0x60, 0x18,", "0x40, 0x72, 0x0E, 0x08, 0x61, 0x04, 0x30, 0x86, 0x08, 0x61, 0x04, 0x30, 0xC3,", "[ 1123, 8, 9, 7, 0, -7 ], # 0x7A 'z' [ 1132,", "'I' [ 500, 9, 12, 8, 0, -11 ], # 0x4A 'J' [", "# 0x3F '?' [ 323, 13, 12, 14, 1, -11 ], # 0x40", "FreeSerifItalic9pt7bBitmaps = [ 0x11, 0x12, 0x22, 0x24, 0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04,", "0x84, 0x26, 0x20, 0x99, 0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06,", "995, 13, 8, 13, 0, -7 ], # 0x6D 'm' [ 1008, 8,", "0x06, 0x00, 0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04, 0x60, 0x46,", "'c' [ 884, 9, 12, 9, 0, -11 ], # 0x64 'd' [", "'5' [ 216, 9, 13, 9, 1, -12 ], # 0x36 '6' [", "0, -11 ], # 0x62 'b' [ 876, 8, 8, 7, 0, -7", "0x02, 0x03, 0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00,", "1, -8 ], # 0x3E '>' [ 312, 7, 12, 8, 2, -11", "10, 4, 1, -7 ], # 0x3B '' [ 284, 9, 9, 10,", "0x98, 0x27, 0x18, 0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38, 0x78, 0x60, 0x83,", "0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08, 0x61, 0x8C,", "0x01, 0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C, 0x07, 0x0C, 0x0E, 0x0C,", "0x20 ' ' [ 0, 4, 12, 6, 1, -11 ], # 0x21", "0x07, 0x91, 0x87, 0x30, 0x26, 0x02, 0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0,", "0xF0, 0x03, 0x02, 0x06, 0x04, 0x08, 0x08, 0x10, 0x30, 0x20, 0x60, 0x40, 0xC0,", "13, 12, 12, 0, -11 ], # 0x4B 'K' [ 534, 11, 12,", "15, 6, 0, -11 ], # 0x29 ')' [ 105, 6, 8, 9,", "0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1, 0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80,", "# 0x6E 'n' [ 1016, 9, 8, 9, 0, -7 ], # 0x6F", "943, 9, 12, 9, 0, -11 ], # 0x68 'h' [ 957, 4,", "'d' [ 898, 7, 8, 7, 0, -7 ], # 0x65 'e' [", "1025, 10, 12, 8, -1, -7 ], # 0x70 'p' [ 1040, 9,", "0x04, 0x08, 0x10, 0x60, 0x81, 0x02, 0x04, 0x70, 0x38, 0x10, 0x10, 0x10, 0x37,", "12, 0, -11 ], # 0x4B 'K' [ 534, 11, 12, 10, 0,", "# 0x22 '\"' [ 9, 10, 12, 9, 0, -11 ], # 0x23", "245, 9, 13, 9, 1, -12 ], # 0x38 '8' [ 260, 9,", "8, 6, 0, -7 ], # 0x73 's' [ 1068, 5, 9, 4,", "0x04, 0x01, 0x80, 0x78, 0x00, 0x07, 0x83, 0x18, 0xC1, 0x98, 0x36, 0x07, 0xC0,", "0x80, 0x80, 0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E,", "0x00, 0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F, 0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70,", "0x04, 0x08, 0x20, 0x60, 0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs = [ [ 0, 0,", "6, 5, 4, 6, 3, -11 ], # 0x22 '\"' [ 9, 10,", "# 0x25 '%' [ 62, 12, 12, 14, 1, -11 ], # 0x26", "], # 0x63 'c' [ 884, 9, 12, 9, 0, -11 ], #", "1, -11 ], # 0x5D ']' [ 842, 8, 7, 8, 0, -11", "0x77 'w' [ 1100, 9, 8, 8, -1, -7 ], # 0x78 'x'", "-12 ], # 0x31 '1' [ 162, 8, 12, 9, 1, -11 ],", "0x00, 0x00, 0x0F, 0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18, 0x38, 0x70, 0xC0,", "0x37 '7' [ 245, 9, 13, 9, 1, -12 ], # 0x38 '8'", "'h' [ 957, 4, 12, 4, 1, -11 ], # 0x69 'i' [", "0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46, 0x23, 0x20, 0x90, 0x50, 0x28, 0x18,", "0x72, 0x0E, 0x08, 0x61, 0x04, 0x30, 0x86, 0x08, 0x61, 0x04, 0x30, 0xC3, 0x8F,", "0x03, 0x03, 0x1E, 0x00, 0x01, 0x83, 0x87, 0x07, 0x03, 0x03, 0x73, 0xCD, 0x86,", "1, -7 ], # 0x3A ':' [ 279, 4, 10, 4, 1, -7", "7, 0, -7 ], # 0x65 'e' [ 905, 11, 17, 8, -1,", "0x20, 0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4,", "0xE0, 0x0E, 0x65, 0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02,", "0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C, 0x3C, 0x3E, 0x1B, 0x18,", "41, 14, 12, 15, 1, -11 ], # 0x25 '%' [ 62, 12,", "0x10, 0xE2, 0x2C, 0x44, 0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19,", "0x60, 0x8F, 0xF8, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11,", "6, 12, 9, 2, -11 ], # 0x5C '\\' [ 830, 6, 15,", "# 0x6F 'o' [ 1025, 10, 12, 8, -1, -7 ], # 0x70", "0x81, 0x02, 0x04, 0x70, 0x38, 0x10, 0x10, 0x10, 0x37, 0x22, 0x24, 0x38, 0x78,", "0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02, 0x04, 0x10, 0x20, 0x40, 0x82, 0x04, 0x08,", "0x98, 0x36, 0x07, 0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01,", "7, 12, 8, 2, -11 ], # 0x3F '?' [ 323, 13, 12,", "122, 2, 4, 5, 0, -1 ], # 0x2C ',' [ 123, 4,", "0x82, 0x23, 0xFC, 0x24, 0x11, 0x04, 0x83, 0x20, 0x1C, 0x1B, 0x99, 0x4D, 0x26,", "0xC0, 0x80, 0x00, 0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06,", "0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38, 0x86, 0xB2,", "0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20, 0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC,", "15, 12, 16, 2, -11 ], # 0x57 'W' [ 757, 12, 12,", "']' [ 1160, 8, 3, 10, 1, -5 ] ] # 0x7E '~'", "0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9, 0x8E, 0x31, 0x04, 0x01,", "0x7C, 0x38, 0x78, 0x60, 0x83, 0x04, 0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D, 0x84,", "# 0x3A ':' [ 279, 4, 10, 4, 1, -7 ], # 0x3B", "0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C, 0x81,", "0x11, 0x11, 0xB9, 0x8E, 0x77, 0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F,", "0x64, 0x4C, 0xDE, 0x77, 0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11,", "9, 1, -12 ], # 0x31 '1' [ 162, 8, 12, 9, 1,", "0x10, 0x40, 0x82, 0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08, 0x00, 0x30,", "0x31, 0x0F, 0x00, 0x38, 0x08, 0x04, 0x02, 0x03, 0x39, 0x6C, 0xC6, 0x46, 0x63,", "11, 17, 8, -1, -12 ], # 0x66 'f' [ 929, 9, 12,", "0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01, 0x80, 0x70, 0xB7, 0xE0,", "0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44, 0xD8, 0x9D,", "0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2, 0x44, 0xC8,", "9, 0, -11 ], # 0x62 'b' [ 876, 8, 8, 7, 0,", "0xCE, 0x66, 0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E, 0x77, 0x3B, 0x33, 0x62, 0x62,", "0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01, 0x80, 0x30,", "0x30, 0x41, 0x87, 0x80, 0x0F, 0x81, 0x80, 0x80, 0xC0, 0x60, 0x20, 0x30, 0x18,", "0x20, 0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11, 0x04, 0x83, 0x20,", "0xC6, 0x86, 0x8C, 0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2,", "1, -11 ], # 0x21 '!' [ 6, 5, 4, 6, 3, -11", "0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65, 0x8B,", "[ 595, 11, 12, 12, 1, -11 ], # 0x4F 'O' [ 612,", "], # 0x7D ']' [ 1160, 8, 3, 10, 1, -5 ] ]", "'L' [ 551, 16, 12, 15, 0, -11 ], # 0x4D 'M' [", "0x30, 0xF4, 0x7C, 0x38, 0x78, 0x60, 0x83, 0x04, 0x2C, 0x41, 0x22, 0x09, 0x10,", "2, -11 ], # 0x59 'Y' [ 790, 11, 12, 10, 0, -11", "989, 4, 12, 5, 1, -11 ], # 0x6C 'l' [ 995, 13,", "0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31, 0x88, 0x62, 0x30, 0xF0, 0x60,", "0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E, 0x00, 0x01, 0x83, 0x87, 0x07, 0x03,", "0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02, 0x00, 0x40, 0x3E,", "0x70, 0x7F, 0xE9, 0x8E, 0x31, 0x04, 0x01, 0x80, 0x30, 0x06, 0x00, 0x80, 0x30,", "# 0x4C 'L' [ 551, 16, 12, 15, 0, -11 ], # 0x4D", "0xC8, 0x07, 0x00, 0x60, 0x06, 0x00, 0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E,", "'-' [ 124, 2, 2, 5, 0, -1 ], # 0x2E '.' [", "0x4F 'O' [ 612, 11, 12, 10, 0, -11 ], # 0x50 'P'", "0x6C, 0x36, 0x1F, 0x0F, 0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80, 0x04,", "0xE1, 0x80, 0x30, 0x04, 0x01, 0x80, 0x78, 0x00, 0x07, 0x83, 0x18, 0xC1, 0x98,", "0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20, 0xC1, 0x83, 0x04, 0x18,", "-11 ], # 0x51 'Q' [ 650, 11, 12, 11, 0, -11 ],", "-12 ], # 0x36 '6' [ 231, 9, 12, 9, 1, -11 ],", "0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00, 0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31,", "9, 13, 9, 1, -12 ], # 0x38 '8' [ 260, 9, 13,", "11, 0, -11 ], # 0x42 'B' [ 376, 12, 12, 11, 1,", "] FreeSerifItalic9pt7bGlyphs = [ [ 0, 0, 0, 5, 0, 1 ], #", "], # 0x20 ' ' [ 0, 4, 12, 6, 1, -11 ],", "0x40, 0xB1, 0x05, 0x84, 0x26, 0x20, 0x99, 0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06,", "1109, 9, 12, 9, 0, -7 ], # 0x79 'y' [ 1123, 8,", "6, 1, -3 ], # 0x2D '-' [ 124, 2, 2, 5, 0,", "0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04, 0x08, 0x08, 0x10, 0x30,", "0x8C, 0x1E, 0x00, 0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1,", "0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20, 0xC1, 0x83, 0x04, 0x18, 0x30, 0x41, 0x87,", "# 0x58 'X' [ 775, 10, 12, 11, 2, -11 ], # 0x59", "',' [ 123, 4, 1, 6, 1, -3 ], # 0x2D '-' [", "0xC0, 0x83, 0x04, 0x08, 0x10, 0x60, 0x81, 0x02, 0x04, 0x70, 0x38, 0x10, 0x10,", "8, 1, -7 ], # 0x76 'v' [ 1089, 11, 8, 12, 1,", "0x36, 0x07, 0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01, 0x80,", "], # 0x45 'E' [ 432, 12, 12, 10, 0, -11 ], #", "11, 11, 12, 0, -10 ], # 0x41 'A' [ 359, 11, 12,", "0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C, 0x36, 0x1B, 0x0D, 0x86,", "0x06, 0x05, 0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02, 0x04, 0x10, 0x20,", "# 0x26 '&' [ 80, 2, 4, 4, 3, -11 ], # 0x27", "] ] # 0x7E '~' FreeSerifItalic9pt7b = [ FreeSerifItalic9pt7bBitmaps, FreeSerifItalic9pt7bGlyphs, 0x20, 0x7E, 22", "0, 1 ], # 0x20 ' ' [ 0, 4, 12, 6, 1,", "# 0x71 'q' [ 1054, 7, 8, 7, 0, -7 ], # 0x72", "# 0x67 'g' [ 943, 9, 12, 9, 0, -11 ], # 0x68", "0x1F, 0x0F, 0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80, 0x04, 0x70, 0xC3,", "0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F, 0x07, 0x87,", "10, 1, -8 ], # 0x3E '>' [ 312, 7, 12, 8, 2,", "0x61, 0xB1, 0x88, 0x83, 0x80, 0x04, 0x70, 0xC3, 0x08, 0x21, 0x86, 0x10, 0x43,", "0x50 'P' [ 629, 11, 15, 12, 1, -11 ], # 0x51 'Q'", "0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01, 0xE0, 0xBC,", "0x1C, 0x67, 0x83, 0x03, 0x02, 0x06, 0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F,", "0x00, 0x20, 0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3,", "0x41 'A' [ 359, 11, 12, 11, 0, -11 ], # 0x42 'B'", "0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40, 0x40, 0x60, 0x20, 0x20, 0x10, 0x10, 0x18,", "0x30, 0x0C, 0x03, 0x00, 0x40, 0x18, 0x06, 0x05, 0x81, 0x7F, 0xE0, 0x0E, 0x10,", "0xE0, 0x1C, 0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C,", "0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60, 0x33, 0x01, 0x0C,", "0, -11 ], # 0x2F '/' [ 137, 9, 13, 9, 1, -12", "0, -11 ], # 0x52 'R' [ 667, 10, 12, 8, 0, -11", "10, 0, -11 ], # 0x50 'P' [ 629, 11, 15, 12, 1,", "0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38, 0x86, 0xB2, 0x0D, 0x24,", "9, 12, 9, 0, -7 ], # 0x79 'y' [ 1123, 8, 9,", "0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26, 0x48, 0x26, 0x98, 0x27, 0x18, 0x27,", "11, 2, -11 ], # 0x54 'T' [ 699, 12, 12, 13, 2,", "0x8C, 0x43, 0x38, 0x86, 0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80,", "], # 0x2A '#' [ 111, 9, 9, 12, 1, -8 ], #", "0x86, 0x08, 0x61, 0x04, 0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10,", "[ 174, 9, 12, 9, 0, -11 ], # 0x33 '3' [ 188,", "'W' [ 757, 12, 12, 12, 0, -11 ], # 0x58 'X' [", "], # 0x62 'b' [ 876, 8, 8, 7, 0, -7 ], #", "0x1E, 0x4C, 0x63, 0x08, 0xC0, 0x38, 0x07, 0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6,", "-7 ], # 0x3B '' [ 284, 9, 9, 10, 1, -8 ],", "-11 ], # 0x4F 'O' [ 612, 11, 12, 10, 0, -11 ],", "'i' [ 963, 7, 16, 5, -1, -11 ], # 0x6A 'j' [", "7, 16, 5, -1, -11 ], # 0x6A 'j' [ 977, 8, 12,", "8, -1, -12 ], # 0x66 'f' [ 929, 9, 12, 8, 0,", "0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1, 0x80, 0x30, 0x04, 0x01,", "12, 11, 0, -11 ], # 0x52 'R' [ 667, 10, 12, 8,", "0x3B '' [ 284, 9, 9, 10, 1, -8 ], # 0x3C '<'", "'1' [ 162, 8, 12, 9, 1, -11 ], # 0x32 '2' [", "12, 12, 12, 1, -11 ], # 0x47 'G' [ 468, 14, 12,", "0x2E '.' [ 125, 8, 12, 5, 0, -11 ], # 0x2F '/'", "0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1, 0x98, 0x31, 0x84, 0x31, 0x86, 0x78,", "9, 1, -12 ], # 0x24 '$' [ 41, 14, 12, 15, 1,", "0x61, 0x04, 0x30, 0x86, 0x08, 0x61, 0x04, 0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0,", "0x41, 0x61, 0x21, 0x11, 0x18, 0x88, 0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F, 0x84,", "0, -12 ], # 0x39 '9' [ 275, 4, 8, 4, 1, -7", "851, 3, 3, 5, 2, -11 ], # 0x60 '`' [ 853, 9,", "0x31, 0x88, 0x62, 0x30, 0xF0, 0x60, 0x10, 0x04, 0x03, 0x80, 0x0F, 0x18, 0x98,", "0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40, 0x60, 0x20, 0xF1,", "0x00, 0x1F, 0x80, 0x01, 0x00, 0x60, 0x14, 0x04, 0xC0, 0x98, 0x23, 0x07, 0xE1,", "-12 ], # 0x39 '9' [ 275, 4, 8, 4, 1, -7 ],", "12, 0, -10 ], # 0x41 'A' [ 359, 11, 12, 11, 0,", "699, 12, 12, 13, 2, -11 ], # 0x55 'U' [ 717, 11,", "2, -6 ], # 0x3D '=' [ 301, 9, 9, 10, 1, -8", "], # 0x60 '`' [ 853, 9, 8, 9, 0, -7 ], #", "0x0E, 0x00, 0x58, 0x06, 0x60, 0x33, 0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E,", "0x08, 0x1C, 0x00, 0x81, 0x04, 0x18, 0x20, 0xC1, 0x04, 0x08, 0x20, 0x41, 0x38,", "5, 2, -11 ], # 0x7C '|' [ 1146, 7, 16, 7, 0,", "0x80, 0x30, 0x04, 0x01, 0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C, 0x07,", "9, 1, -11 ], # 0x37 '7' [ 245, 9, 13, 9, 1,", "0x04, 0x03, 0x80, 0x60, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E, 0x00, 0x01,", "0x03, 0x00, 0x40, 0x08, 0x01, 0x00, 0x60, 0x0C, 0x01, 0x00, 0x20, 0x04, 0x01,", "0x01, 0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46, 0x23, 0x20, 0x90, 0x50,", "7, 8, 0, -11 ], # 0x5E '^' [ 849, 9, 1, 9,", "-11 ], # 0x69 'i' [ 963, 7, 16, 5, -1, -11 ],", "], # 0x5F '_' [ 851, 3, 3, 5, 2, -11 ], #", "963, 7, 16, 5, -1, -11 ], # 0x6A 'j' [ 977, 8,", "0x80, 0x30, 0x06, 0x00, 0x80, 0x30, 0x06, 0x00, 0x80, 0x7E, 0x00, 0x7C, 0xF3,", "0, 0, 0, 5, 0, 1 ], # 0x20 ' ' [ 0,", "0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0, 0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06,", "-11 ], # 0x33 '3' [ 188, 9, 12, 9, 0, -11 ],", "0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06, 0x00, 0x0F, 0xC0, 0xC3,", "6, 13, 9, 1, -12 ], # 0x31 '1' [ 162, 8, 12,", "-11 ], # 0x26 '&' [ 80, 2, 4, 4, 3, -11 ],", "0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02, 0x00, 0x40, 0x3E, 0x03, 0x00, 0x40,", "0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77, 0x39, 0x5E,", "0x42, 0x42, 0xC6, 0x86, 0x8C, 0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C,", "0xCE, 0xFA, 0x08, 0x21, 0x08, 0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02,", "[ 152, 6, 13, 9, 1, -12 ], # 0x31 '1' [ 162,", "'P' [ 629, 11, 15, 12, 1, -11 ], # 0x51 'Q' [", "0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01, 0xE0, 0xBC, 0x82,", "0x01, 0x1F, 0x00, 0x01, 0x01, 0x81, 0x41, 0x61, 0x21, 0x11, 0x18, 0x88, 0xFF,", "0xCC, 0x33, 0x00, 0x00, 0x44, 0x48, 0x01, 0x83, 0x86, 0x1C, 0x0C, 0x03, 0x80,", "0xE1, 0xC3, 0x01, 0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00, 0x38, 0x08, 0x04,", "8, 8, 9, 0, -7 ], # 0x6E 'n' [ 1016, 9, 8,", "0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20, 0xC1, 0x83, 0x04, 0x18, 0x30,", "0x20, 0x60, 0x99, 0x8E ] FreeSerifItalic9pt7bGlyphs = [ [ 0, 0, 0, 5,", "0xE0, 0x3F, 0xC0, 0x63, 0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66,", "], # 0x7C '|' [ 1146, 7, 16, 7, 0, -12 ], #" ]
[ "'0003_travel_history2'), ] operations = [ migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField( model_name='medical_history', old_name='COPD',", "model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd', ), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus',", "model_name='medical_history', old_name='COPD', new_name='copd', ), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids',", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('travello', '0003_travel_history2'), ] operations", "] operations = [ migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd',", "), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history',", "Migration(migrations.Migration): dependencies = [ ('travello', '0003_travel_history2'), ] operations = [ migrations.RenameField( model_name='medical_history', old_name='Bronchitis',", "12:47 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('travello', '0003_travel_history2'), ]", "[ ('travello', '0003_travel_history2'), ] operations = [ migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField(", "class Migration(migrations.Migration): dependencies = [ ('travello', '0003_travel_history2'), ] operations = [ migrations.RenameField( model_name='medical_history',", "= [ ('travello', '0003_travel_history2'), ] operations = [ migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ),", "new_name='copd', ), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField(", "migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history', old_name='Kidney_Disease', new_name='kidney_disease', ), migrations.RenameField( model_name='medical_history', old_name='Stroke',", "model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history', old_name='Kidney_Disease', new_name='kidney_disease', ), migrations.RenameField( model_name='medical_history', old_name='Stroke', new_name='stroke',", "old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd', ), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ),", "Django 3.0.6 on 2020-06-07 12:47 from django.db import migrations class Migration(migrations.Migration): dependencies =", "dependencies = [ ('travello', '0003_travel_history2'), ] operations = [ migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis',", "), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history', old_name='Kidney_Disease', new_name='kidney_disease', ), migrations.RenameField( model_name='medical_history',", "# Generated by Django 3.0.6 on 2020-06-07 12:47 from django.db import migrations class", "migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history', old_name='Kidney_Disease',", "migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd', ), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus',", "model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history', old_name='Kidney_Disease', new_name='kidney_disease',", "2020-06-07 12:47 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('travello', '0003_travel_history2'),", "import migrations class Migration(migrations.Migration): dependencies = [ ('travello', '0003_travel_history2'), ] operations = [", "3.0.6 on 2020-06-07 12:47 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history', old_name='Kidney_Disease', new_name='kidney_disease', ), migrations.RenameField(", "('travello', '0003_travel_history2'), ] operations = [ migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField( model_name='medical_history',", "migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd', ), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS',", "operations = [ migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd', ),", "on 2020-06-07 12:47 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('travello',", "old_name='COPD', new_name='copd', ), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ),", "= [ migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd', ), migrations.RenameField(", "[ migrations.RenameField( model_name='medical_history', old_name='Bronchitis', new_name='bronchitis', ), migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd', ), migrations.RenameField( model_name='medical_history',", "old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ),", "migrations class Migration(migrations.Migration): dependencies = [ ('travello', '0003_travel_history2'), ] operations = [ migrations.RenameField(", "), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history',", "new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField(", "new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history', old_name='Kidney_Disease', new_name='kidney_disease', ), migrations.RenameField( model_name='medical_history', old_name='Stroke', new_name='stroke', ), ]", "Generated by Django 3.0.6 on 2020-06-07 12:47 from django.db import migrations class Migration(migrations.Migration):", "migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease',", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('travello', '0003_travel_history2'), ] operations =", "), migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd', ), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history',", "old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history', old_name='Kidney_Disease', new_name='kidney_disease', ), migrations.RenameField( model_name='medical_history', old_name='Stroke', new_name='stroke', ),", "old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease', ), migrations.RenameField( model_name='medical_history', old_name='Kidney_Disease', new_name='kidney_disease', ),", "by Django 3.0.6 on 2020-06-07 12:47 from django.db import migrations class Migration(migrations.Migration): dependencies", "model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField( model_name='medical_history', old_name='HIV_AIDS', new_name='hiv_aids', ), migrations.RenameField( model_name='medical_history', old_name='Ischemic_heart_disease', new_name='ischemic_heart_disease',", "new_name='bronchitis', ), migrations.RenameField( model_name='medical_history', old_name='COPD', new_name='copd', ), migrations.RenameField( model_name='medical_history', old_name='Diabetes_mellitus', new_name='diabetes_mellitus', ), migrations.RenameField(" ]
[ "properties[i] = [attacki, # defensei] represents the properties of the ith character in", "attack and defense. # # Example 3: # # Input: properties = [[1,5],[10,4],[4,3]]", "third character is weak because the second character has a strictly greater attack", "characters, # and each of the characters has two main properties: attack and", "def numberOfWeakCharacters(self, properties: List[List[int]]) -> int: properties.sort(key=lambda x: (-x[0], x[1])) ans = 0", "# https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You are playing a game that contains multiple characters,", "# # Example 1: # # Input: properties = [[5,5],[6,3],[3,6]] # Output: 0", "_, d in properties: if d < curr_max: ans += 1 else: curr_max", "# Input: properties = [[5,5],[6,3],[3,6]] # Output: 0 # Explanation: No character has", "# Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You are playing a game that", "there exists another character j where attackj > attacki and defensej > defensei.", "properties: if d < curr_max: ans += 1 else: curr_max = d return", "Example 2: # # Input: properties = [[2,2],[3,3]] # Output: 1 # Explanation:", "character is weak because the second character has a strictly greater attack and", "a strictly greater attack and defense. # # Example 3: # # Input:", "number of weak characters. # # Example 1: # # Input: properties =", "# # 2 <= properties.length <= 10^5 # properties[i].length == 2 # 1", "# Example 3: # # Input: properties = [[1,5],[10,4],[4,3]] # Output: 1 #", "to be weak if there exists another character j where attackj > attacki", "# Return the number of weak characters. # # Example 1: # #", "# Output: 0 # Explanation: No character has strictly greater attack and defense", "# Input: properties = [[1,5],[10,4],[4,3]] # Output: 1 # Explanation: The third character", "The third character is weak because the second character has a strictly greater", "defensei] represents the properties of the ith character in the game. # A", "where properties[i] = [attacki, # defensei] represents the properties of the ith character", "strictly greater attack and defense than the other. # # Example 2: #", "character in the game. # A character is said to be weak if", "if d < curr_max: ans += 1 else: curr_max = d return ans", "the ith character in the game. # A character is said to be", "the Game # Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You are playing a", "characters. # # Example 1: # # Input: properties = [[5,5],[6,3],[3,6]] # Output:", "main properties: attack and defense. # You are given a 2D integer array", "# Explanation: The first character is weak because the second character has a", "is said to be weak if there exists another character j where attackj", "# and each of the characters has two main properties: attack and defense.", "properties = [[1,5],[10,4],[4,3]] # Output: 1 # Explanation: The third character is weak", "1 else: curr_max = d return ans if __name__ == \"__main__\": import os", "character has a strictly greater attack and defense. # # Example 3: #", "two main properties: attack and defense. # You are given a 2D integer", "other. # # Example 2: # # Input: properties = [[2,2],[3,3]] # Output:", "attack and defense levels. # More formally, # a character i is said", "Solution: def numberOfWeakCharacters(self, properties: List[List[int]]) -> int: properties.sort(key=lambda x: (-x[0], x[1])) ans =", "has a strictly greater attack and defense. # # Example 3: # #", "formally, # a character i is said to be weak if there exists", "# # Input: properties = [[5,5],[6,3],[3,6]] # Output: 0 # Explanation: No character", "in properties: if d < curr_max: ans += 1 else: curr_max = d", "properties.sort(key=lambda x: (-x[0], x[1])) ans = 0 curr_max = 0 for _, d", "j where attackj > attacki and defensej > defensei. # Return the number", "# 1 <= attacki, defensei <= 10^5 # # from typing import List", "[[5,5],[6,3],[3,6]] # Output: 0 # Explanation: No character has strictly greater attack and", "# # Example 3: # # Input: properties = [[1,5],[10,4],[4,3]] # Output: 1", "array properties where properties[i] = [attacki, # defensei] represents the properties of the", "and defense levels strictly greater than this character's attack and defense levels. #", "and defense. # # # Constraints: # # 2 <= properties.length <= 10^5", "characters has two main properties: attack and defense. # You are given a", "# a character i is said to be weak if there exists another", "Return the number of weak characters. # # Example 1: # # Input:", "of the characters has two main properties: attack and defense. # You are", "be weak if there exists another character j where attackj > attacki and", "levels. # More formally, # a character i is said to be weak", "character's attack and defense levels. # More formally, # a character i is", "d < curr_max: ans += 1 else: curr_max = d return ans if", "defense levels strictly greater than this character's attack and defense levels. # More", "int: properties.sort(key=lambda x: (-x[0], x[1])) ans = 0 curr_max = 0 for _,", "x[1])) ans = 0 curr_max = 0 for _, d in properties: if", "Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You are playing a game that contains", "= [[1,5],[10,4],[4,3]] # Output: 1 # Explanation: The third character is weak because", "properties.length <= 10^5 # properties[i].length == 2 # 1 <= attacki, defensei <=", "the number of weak characters. # # Example 1: # # Input: properties", "0 # Explanation: No character has strictly greater attack and defense than the", "ans += 1 else: curr_max = d return ans if __name__ == \"__main__\":", "other character has both attack and defense levels strictly greater than this character's", "a character i is said to be weak if there exists another character", "and defensej > defensei. # Return the number of weak characters. # #", "= [attacki, # defensei] represents the properties of the ith character in the", "defensei. # Return the number of weak characters. # # Example 1: #", "Explanation: The first character is weak because the second character has a strictly", "ith character in the game. # A character is said to be weak", "= 0 curr_max = 0 for _, d in properties: if d <", "curr_max = 0 for _, d in properties: if d < curr_max: ans", "You are playing a game that contains multiple characters, # and each of", "# A character is said to be weak if any other character has", "both attack and defense levels strictly greater than this character's attack and defense", "The first character is weak because the second character has a strictly greater", "<= 10^5 # properties[i].length == 2 # 1 <= attacki, defensei <= 10^5", "than this character's attack and defense levels. # More formally, # a character", "-> int: properties.sort(key=lambda x: (-x[0], x[1])) ans = 0 curr_max = 0 for", "properties = [[5,5],[6,3],[3,6]] # Output: 0 # Explanation: No character has strictly greater", "are given a 2D integer array properties where properties[i] = [attacki, # defensei]", "attacki, defensei <= 10^5 # # from typing import List class Solution: def", "# You are given a 2D integer array properties where properties[i] = [attacki,", "and defense than the other. # # Example 2: # # Input: properties", "that contains multiple characters, # and each of the characters has two main", "Input: properties = [[5,5],[6,3],[3,6]] # Output: 0 # Explanation: No character has strictly", "a game that contains multiple characters, # and each of the characters has", "curr_max: ans += 1 else: curr_max = d return ans if __name__ ==", "x: (-x[0], x[1])) ans = 0 curr_max = 0 for _, d in", "of the ith character in the game. # A character is said to", "# Input: properties = [[2,2],[3,3]] # Output: 1 # Explanation: The first character", "1: # # Input: properties = [[5,5],[6,3],[3,6]] # Output: 0 # Explanation: No", "# # Example 2: # # Input: properties = [[2,2],[3,3]] # Output: 1", "a 2D integer array properties where properties[i] = [attacki, # defensei] represents the", "properties: List[List[int]]) -> int: properties.sort(key=lambda x: (-x[0], x[1])) ans = 0 curr_max =", "1996 python3 # [1996] The Number of Weak Characters in the Game #", "in the game. # A character is said to be weak if any", "else: curr_max = d return ans if __name__ == \"__main__\": import os import", "the second character has a strictly greater attack and defense. # # Example", "typing import List class Solution: def numberOfWeakCharacters(self, properties: List[List[int]]) -> int: properties.sort(key=lambda x:", "strictly greater than this character's attack and defense levels. # More formally, #", "2: # # Input: properties = [[2,2],[3,3]] # Output: 1 # Explanation: The", "0 curr_max = 0 for _, d in properties: if d < curr_max:", "= 0 for _, d in properties: if d < curr_max: ans +=", "attack and defense than the other. # # Example 2: # # Input:", "of weak characters. # # Example 1: # # Input: properties = [[5,5],[6,3],[3,6]]", "represents the properties of the ith character in the game. # A character", "properties[i].length == 2 # 1 <= attacki, defensei <= 10^5 # # from", "# # You are playing a game that contains multiple characters, # and", "character i is said to be weak if there exists another character j", "No character has strictly greater attack and defense than the other. # #", "# # from typing import List class Solution: def numberOfWeakCharacters(self, properties: List[List[int]]) ->", "defensej > defensei. # Return the number of weak characters. # # Example", "# # # Constraints: # # 2 <= properties.length <= 10^5 # properties[i].length", "another character j where attackj > attacki and defensej > defensei. # Return", "has strictly greater attack and defense than the other. # # Example 2:", "Output: 0 # Explanation: No character has strictly greater attack and defense than", "2 # 1 <= attacki, defensei <= 10^5 # # from typing import", "1 <= attacki, defensei <= 10^5 # # from typing import List class", "Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You are playing a game that contains multiple", "< curr_max: ans += 1 else: curr_max = d return ans if __name__", "properties where properties[i] = [attacki, # defensei] represents the properties of the ith", "and defense levels. # More formally, # a character i is said to", "character is said to be weak if any other character has both attack", "character j where attackj > attacki and defensej > defensei. # Return the", "integer array properties where properties[i] = [attacki, # defensei] represents the properties of", "attacki and defensej > defensei. # Return the number of weak characters. #", "has both attack and defense levels strictly greater than this character's attack and", "attack and defense. # You are given a 2D integer array properties where", "any other character has both attack and defense levels strictly greater than this", "Explanation: No character has strictly greater attack and defense than the other. #", "character has strictly greater attack and defense than the other. # # Example", "Input: properties = [[1,5],[10,4],[4,3]] # Output: 1 # Explanation: The third character is", "levels strictly greater than this character's attack and defense levels. # More formally,", "# More formally, # a character i is said to be weak if", "# Output: 1 # Explanation: The first character is weak because the second", "strictly greater attack and defense. # # # Constraints: # # 2 <=", "d return ans if __name__ == \"__main__\": import os import pytest pytest.main([os.path.join(\"tests\", \"test_1996.py\")])", "# Constraints: # # 2 <= properties.length <= 10^5 # properties[i].length == 2", "has a strictly greater attack and defense. # # # Constraints: # #", "given a 2D integer array properties where properties[i] = [attacki, # defensei] represents", "class Solution: def numberOfWeakCharacters(self, properties: List[List[int]]) -> int: properties.sort(key=lambda x: (-x[0], x[1])) ans", "of Weak Characters in the Game # Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # #", "== 2 # 1 <= attacki, defensei <= 10^5 # # from typing", "numberOfWeakCharacters(self, properties: List[List[int]]) -> int: properties.sort(key=lambda x: (-x[0], x[1])) ans = 0 curr_max", "1 # Explanation: The third character is weak because the second character has", "@l2g 1996 python3 # [1996] The Number of Weak Characters in the Game", "# [1996] The Number of Weak Characters in the Game # Difficulty: Medium", "List class Solution: def numberOfWeakCharacters(self, properties: List[List[int]]) -> int: properties.sort(key=lambda x: (-x[0], x[1]))", "properties: attack and defense. # You are given a 2D integer array properties", "character has a strictly greater attack and defense. # # # Constraints: #", "attack and defense levels strictly greater than this character's attack and defense levels.", "weak if there exists another character j where attackj > attacki and defensej", "# defensei] represents the properties of the ith character in the game. #", "= [[2,2],[3,3]] # Output: 1 # Explanation: The first character is weak because", "defense levels. # More formally, # a character i is said to be", "# # Input: properties = [[1,5],[10,4],[4,3]] # Output: 1 # Explanation: The third", "<filename>src/leetcode_1996_the_number_of_weak_characters_in_the_game.py # @l2g 1996 python3 # [1996] The Number of Weak Characters in", "because the second character has a strictly greater attack and defense. # #", "each of the characters has two main properties: attack and defense. # You", "> defensei. # Return the number of weak characters. # # Example 1:", "# # Constraints: # # 2 <= properties.length <= 10^5 # properties[i].length ==", "attackj > attacki and defensej > defensei. # Return the number of weak", "and defense. # You are given a 2D integer array properties where properties[i]", "the properties of the ith character in the game. # A character is", "greater than this character's attack and defense levels. # More formally, # a", "ans = 0 curr_max = 0 for _, d in properties: if d", "the characters has two main properties: attack and defense. # You are given", "i is said to be weak if there exists another character j where", "Output: 1 # Explanation: The first character is weak because the second character", "10^5 # properties[i].length == 2 # 1 <= attacki, defensei <= 10^5 #", "More formally, # a character i is said to be weak if there", "said to be weak if there exists another character j where attackj >", "# Explanation: The third character is weak because the second character has a", "playing a game that contains multiple characters, # and each of the characters", "Characters in the Game # Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You are", "game that contains multiple characters, # and each of the characters has two", "2 <= properties.length <= 10^5 # properties[i].length == 2 # 1 <= attacki,", "Example 1: # # Input: properties = [[5,5],[6,3],[3,6]] # Output: 0 # Explanation:", "> attacki and defensej > defensei. # Return the number of weak characters.", "is weak because the second character has a strictly greater attack and defense.", "<= properties.length <= 10^5 # properties[i].length == 2 # 1 <= attacki, defensei", "= d return ans if __name__ == \"__main__\": import os import pytest pytest.main([os.path.join(\"tests\",", "You are given a 2D integer array properties where properties[i] = [attacki, #", "Output: 1 # Explanation: The third character is weak because the second character", "first character is weak because the second character has a strictly greater attack", "if there exists another character j where attackj > attacki and defensej >", "# Explanation: No character has strictly greater attack and defense than the other.", "1 # Explanation: The first character is weak because the second character has", "strictly greater attack and defense. # # Example 3: # # Input: properties", "10^5 # # from typing import List class Solution: def numberOfWeakCharacters(self, properties: List[List[int]])", "are playing a game that contains multiple characters, # and each of the", "from typing import List class Solution: def numberOfWeakCharacters(self, properties: List[List[int]]) -> int: properties.sort(key=lambda", "# You are playing a game that contains multiple characters, # and each", "A character is said to be weak if any other character has both", "the game. # A character is said to be weak if any other", "greater attack and defense. # # Example 3: # # Input: properties =", "List[List[int]]) -> int: properties.sort(key=lambda x: (-x[0], x[1])) ans = 0 curr_max = 0", "character has both attack and defense levels strictly greater than this character's attack", "greater attack and defense than the other. # # Example 2: # #", "[[1,5],[10,4],[4,3]] # Output: 1 # Explanation: The third character is weak because the", "weak because the second character has a strictly greater attack and defense. #", "attack and defense. # # # Constraints: # # 2 <= properties.length <=", "Constraints: # # 2 <= properties.length <= 10^5 # properties[i].length == 2 #", "to be weak if any other character has both attack and defense levels", "# # Input: properties = [[2,2],[3,3]] # Output: 1 # Explanation: The first", "d in properties: if d < curr_max: ans += 1 else: curr_max =", "[1996] The Number of Weak Characters in the Game # Difficulty: Medium #", "and each of the characters has two main properties: attack and defense. #", "if any other character has both attack and defense levels strictly greater than", "defensei <= 10^5 # # from typing import List class Solution: def numberOfWeakCharacters(self,", "game. # A character is said to be weak if any other character", "<= 10^5 # # from typing import List class Solution: def numberOfWeakCharacters(self, properties:", "second character has a strictly greater attack and defense. # # # Constraints:", "multiple characters, # and each of the characters has two main properties: attack", "https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You are playing a game that contains multiple characters, #", "is said to be weak if any other character has both attack and", "said to be weak if any other character has both attack and defense", "be weak if any other character has both attack and defense levels strictly", "# Example 1: # # Input: properties = [[5,5],[6,3],[3,6]] # Output: 0 #", "defense than the other. # # Example 2: # # Input: properties =", "The Number of Weak Characters in the Game # Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game", "and defense. # # Example 3: # # Input: properties = [[1,5],[10,4],[4,3]] #", "properties of the ith character in the game. # A character is said", "+= 1 else: curr_max = d return ans if __name__ == \"__main__\": import", "# from typing import List class Solution: def numberOfWeakCharacters(self, properties: List[List[int]]) -> int:", "= [[5,5],[6,3],[3,6]] # Output: 0 # Explanation: No character has strictly greater attack", "than the other. # # Example 2: # # Input: properties = [[2,2],[3,3]]", "greater attack and defense. # # # Constraints: # # 2 <= properties.length", "# properties[i].length == 2 # 1 <= attacki, defensei <= 10^5 # #", "# 2 <= properties.length <= 10^5 # properties[i].length == 2 # 1 <=", "weak characters. # # Example 1: # # Input: properties = [[5,5],[6,3],[3,6]] #", "Number of Weak Characters in the Game # Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game #", "3: # # Input: properties = [[1,5],[10,4],[4,3]] # Output: 1 # Explanation: The", "where attackj > attacki and defensej > defensei. # Return the number of", "defense. # # # Constraints: # # 2 <= properties.length <= 10^5 #", "for _, d in properties: if d < curr_max: ans += 1 else:", "Explanation: The third character is weak because the second character has a strictly", "[[2,2],[3,3]] # Output: 1 # Explanation: The first character is weak because the", "defense. # # Example 3: # # Input: properties = [[1,5],[10,4],[4,3]] # Output:", "Example 3: # # Input: properties = [[1,5],[10,4],[4,3]] # Output: 1 # Explanation:", "the other. # # Example 2: # # Input: properties = [[2,2],[3,3]] #", "curr_max = d return ans if __name__ == \"__main__\": import os import pytest", "# @l2g 1996 python3 # [1996] The Number of Weak Characters in the", "(-x[0], x[1])) ans = 0 curr_max = 0 for _, d in properties:", "properties = [[2,2],[3,3]] # Output: 1 # Explanation: The first character is weak", "a strictly greater attack and defense. # # # Constraints: # # 2", "defense. # You are given a 2D integer array properties where properties[i] =", "<= attacki, defensei <= 10^5 # # from typing import List class Solution:", "second character has a strictly greater attack and defense. # # Example 3:", "Game # Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You are playing a game", "weak if any other character has both attack and defense levels strictly greater", "in the Game # Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You are playing", "0 for _, d in properties: if d < curr_max: ans += 1", "[attacki, # defensei] represents the properties of the ith character in the game.", "# Example 2: # # Input: properties = [[2,2],[3,3]] # Output: 1 #", "import List class Solution: def numberOfWeakCharacters(self, properties: List[List[int]]) -> int: properties.sort(key=lambda x: (-x[0],", "2D integer array properties where properties[i] = [attacki, # defensei] represents the properties", "contains multiple characters, # and each of the characters has two main properties:", "Weak Characters in the Game # Difficulty: Medium # https://leetcode.com/problems/the-number-of-weak-characters-in-the-game # # You", "python3 # [1996] The Number of Weak Characters in the Game # Difficulty:", "# Output: 1 # Explanation: The third character is weak because the second", "exists another character j where attackj > attacki and defensej > defensei. #", "the second character has a strictly greater attack and defense. # # #", "Input: properties = [[2,2],[3,3]] # Output: 1 # Explanation: The first character is", "has two main properties: attack and defense. # You are given a 2D", "this character's attack and defense levels. # More formally, # a character i" ]
[ "dss_file = \"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file) as fid: ts = fid.read_ts(pathname,regular=False,window_flag=0)", "<reponame>alai-arpas/pydsstools ''' Read irregular time-series data ''' from pydsstools.heclib.dss import HecDss dss_file =", "data ''' from pydsstools.heclib.dss import HecDss dss_file = \"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with", "time-series data ''' from pydsstools.heclib.dss import HecDss dss_file = \"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\"", "from pydsstools.heclib.dss import HecDss dss_file = \"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file) as", "pydsstools.heclib.dss import HecDss dss_file = \"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file) as fid:", "\"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file) as fid: ts = fid.read_ts(pathname,regular=False,window_flag=0) print(ts.pytimes) print(ts.values)", "= \"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file) as fid: ts = fid.read_ts(pathname,regular=False,window_flag=0) print(ts.pytimes)", "= \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file) as fid: ts = fid.read_ts(pathname,regular=False,window_flag=0) print(ts.pytimes) print(ts.values) print(ts.nodata) print(ts.empty)", "Read irregular time-series data ''' from pydsstools.heclib.dss import HecDss dss_file = \"example.dss\" pathname", "HecDss dss_file = \"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file) as fid: ts =", "''' from pydsstools.heclib.dss import HecDss dss_file = \"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file)", "pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file) as fid: ts = fid.read_ts(pathname,regular=False,window_flag=0) print(ts.pytimes) print(ts.values) print(ts.nodata)", "import HecDss dss_file = \"example.dss\" pathname = \"/IRREGULAR/TIMESERIES/FLOW//IR-DECADE/Ex3/\" with HecDss.Open(dss_file) as fid: ts", "irregular time-series data ''' from pydsstools.heclib.dss import HecDss dss_file = \"example.dss\" pathname =", "''' Read irregular time-series data ''' from pydsstools.heclib.dss import HecDss dss_file = \"example.dss\"" ]
[ "blank on plugin {cls.__name__}\" ) if cls.__type__ is not None and cls.__type__ not", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "cls.__author__: raise MetadataException(f\"Author cannot be empty on plugin {cls.__name__}\") if cls.__params__: _check_params_meta(cls) if", "DEALINGS IN THE # SOFTWARE. from ..core.bases.base_plugin import Plugin from ..utils.consts import FuzzType", "the plugin metadata \"\"\" metadata = ['__author__', '__params__', '__desc__', '__type__', '__version__'] class_attr =", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "MetadataException(\"The parameters must be a \" f\"dictionary on plugin {cls.__name__}\") param_dict_keys = cls.__params__.keys()", "the Software without restriction, including without limitation the rights # to use, copy,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "cls.__type__ is not None and cls.__type__ not in [ value for key, value", "class with the plugin metadata \"\"\" metadata = ['__author__', '__params__', '__desc__', '__type__', '__version__']", "person obtaining a copy # of this software and associated documentation files (the", "key, value in vars(FuzzType).items() if not key.startswith(\"__\") ]: raise MetadataException( f\"Plugin type should", "{cls.__name__}\" ) def _check_params_meta(cls: Plugin) -> None: \"\"\"Checks the parameter metadata into the", ") def _check_params_meta(cls: Plugin) -> None: \"\"\"Checks the parameter metadata into the plugin", "@type cls: Plugin @param cls: The class with the plugin metadata \"\"\" metadata", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "Plugin from ..utils.consts import FuzzType from ..exceptions import MetadataException def plugin_meta(cls: Plugin) ->", "from ..core.bases.base_plugin import Plugin from ..utils.consts import FuzzType from ..exceptions import MetadataException def", "on plugin {cls.__name__}\") if cls.__params__['type'] is list: if 'cli_list_separator' not in param_dict_keys: raise", "MetadataException(f\"Version cannot be blank on plugin {cls.__name__}\") return cls def _check_mandatory_meta(cls: Plugin) ->", "meta in metadata: if meta not in class_attr: raise MetadataException( f\"Metadata {meta} not", "return cls def _check_mandatory_meta(cls: Plugin) -> None: \"\"\"Checks the mandatory metadata into the", "on plugin {cls.__name__}\") return cls def _check_mandatory_meta(cls: Plugin) -> None: \"\"\"Checks the mandatory", "type should be None or a valid FuzzType on plugin {cls.__name__}\" ) if", "included in all # copies or substantial portions of the Software. # #", "for key, value in vars(FuzzType).items() if not key.startswith(\"__\") ]: raise MetadataException( f\"Plugin type", "is hereby granted, free of charge, to any person obtaining a copy #", "import Plugin from ..utils.consts import FuzzType from ..exceptions import MetadataException def plugin_meta(cls: Plugin)", "cls.__version__: raise MetadataException(f\"Version cannot be blank on plugin {cls.__name__}\") return cls def _check_mandatory_meta(cls:", "persons to whom the Software is # furnished to do so, subject to", "conditions: # # The above copyright notice and this permission notice shall be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "a valid FuzzType on plugin {cls.__name__}\" ) if not cls.__version__: raise MetadataException(f\"Version cannot", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "if cls.__params__: _check_params_meta(cls) if not cls.__desc__: raise MetadataException( f\"Description cannot be blank on", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", ") if not cls.__version__: raise MetadataException(f\"Version cannot be blank on plugin {cls.__name__}\") return", "to permit persons to whom the Software is # furnished to do so,", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "cls: The class that call this decorator \"\"\" _check_mandatory_meta(cls) if not cls.__author__: raise", "raise MetadataException(f\"Value of {key} cannot be empty in \" f\"parameters dict on plugin", "SOFTWARE. from ..core.bases.base_plugin import Plugin from ..utils.consts import FuzzType from ..exceptions import MetadataException", "= vars(cls) for meta in metadata: if meta not in class_attr: raise MetadataException(", "of charge, to any person obtaining a copy # of this software and", "cannot be empty on plugin {cls.__name__}\") if cls.__params__: _check_params_meta(cls) if not cls.__desc__: raise", "this decorator \"\"\" _check_mandatory_meta(cls) if not cls.__author__: raise MetadataException(f\"Author cannot be empty on", "MetadataException( f\"Plugin type should be None or a valid FuzzType on plugin {cls.__name__}\"", "list: if 'cli_list_separator' not in param_dict_keys: raise MetadataException(\"The key 'cli_list_separator' must be present", "MetadataException( f\"Metadata {meta} not specified on plugin {cls.__name__}\" ) def _check_params_meta(cls: Plugin) ->", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from ..core.bases.base_plugin import", "so, subject to the following conditions: # # The above copyright notice and", "MetadataException(f\"Author cannot be empty on plugin {cls.__name__}\") if cls.__params__: _check_params_meta(cls) if not cls.__desc__:", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "MetadataException( f\"Description cannot be blank on plugin {cls.__name__}\" ) if cls.__type__ is not", "raise MetadataException(\"The key 'cli_list_separator' must be present \" \"when parameter type is list", "in param_dict_keys: raise MetadataException(f\"Key {key} must be in parameters \" f\"dict on plugin", "copy # of this software and associated documentation files (the \"Software\"), to deal", "to the following conditions: # # The above copyright notice and this permission", "param_dict_keys = cls.__params__.keys() for key in ['metavar', 'type']: if key not in param_dict_keys:", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "parameters must be a \" f\"dictionary on plugin {cls.__name__}\") param_dict_keys = cls.__params__.keys() for", "parameter metadata into the plugin decorator @type cls: Plugin @param cls: The class", "and associated documentation files (the \"Software\"), to deal # in the Software without", "raise MetadataException(f\"Version cannot be blank on plugin {cls.__name__}\") return cls def _check_mandatory_meta(cls: Plugin)", "be present \" \"when parameter type is list \" f\"on plugin {cls.__name__}\") if", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "raise MetadataException(\"The parameters must be a \" f\"dictionary on plugin {cls.__name__}\") param_dict_keys =", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "raise MetadataException( f\"Description cannot be blank on plugin {cls.__name__}\" ) if cls.__type__ is", "sublicense, and/or sell # copies of the Software, and to permit persons to", "Software is # furnished to do so, subject to the following conditions: #", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "Plugin) -> None: \"\"\"Checks the mandatory metadata into the plugin decorator @type cls:", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "all # copies or substantial portions of the Software. # # THE SOFTWARE", "if not key.startswith(\"__\") ]: raise MetadataException( f\"Plugin type should be None or a", "class_attr = vars(cls) for meta in metadata: if meta not in class_attr: raise", "-> None: \"\"\"Checks the parameter metadata into the plugin decorator @type cls: Plugin", "in param_dict_keys: raise MetadataException(\"The key 'cli_list_separator' must be present \" \"when parameter type", "plugin metadata \"\"\" if (type(cls.__params__) is not dict): raise MetadataException(\"The parameters must be", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "# copies of the Software, and to permit persons to whom the Software", "# Copyright (c) 2020 - present <NAME> <https://github.com/VitorOriel> # # Permission is hereby", "plugin metadata \"\"\" metadata = ['__author__', '__params__', '__desc__', '__type__', '__version__'] class_attr = vars(cls)", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "on plugin {cls.__name__}\" ) def _check_params_meta(cls: Plugin) -> None: \"\"\"Checks the parameter metadata", "not key.startswith(\"__\") ]: raise MetadataException( f\"Plugin type should be None or a valid", "in [ value for key, value in vars(FuzzType).items() if not key.startswith(\"__\") ]: raise", "permission notice shall be included in all # copies or substantial portions of", "The class with the plugin metadata \"\"\" metadata = ['__author__', '__params__', '__desc__', '__type__',", "plugin {cls.__name__}\" ) if cls.__type__ is not None and cls.__type__ not in [", "metadata \"\"\" metadata = ['__author__', '__params__', '__desc__', '__type__', '__version__'] class_attr = vars(cls) for", "into the plugin decorator @type cls: Plugin @param cls: The class with the", "notice and this permission notice shall be included in all # copies or", "_check_mandatory_meta(cls: Plugin) -> None: \"\"\"Checks the mandatory metadata into the plugin decorator @type", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "OTHER DEALINGS IN THE # SOFTWARE. from ..core.bases.base_plugin import Plugin from ..utils.consts import", "for plugin metadata on a plugin class @type cls: Plugin @param cls: The", "software and associated documentation files (the \"Software\"), to deal # in the Software", "on plugin {cls.__name__}\" ) if not cls.__version__: raise MetadataException(f\"Version cannot be blank on", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "cls: Plugin @param cls: The class with the plugin metadata \"\"\" if (type(cls.__params__)", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "f\"dictionary on plugin {cls.__name__}\") param_dict_keys = cls.__params__.keys() for key in ['metavar', 'type']: if", "cls def _check_mandatory_meta(cls: Plugin) -> None: \"\"\"Checks the mandatory metadata into the plugin", "the plugin metadata \"\"\" if (type(cls.__params__) is not dict): raise MetadataException(\"The parameters must", "and to permit persons to whom the Software is # furnished to do", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "the following conditions: # # The above copyright notice and this permission notice", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "_check_params_meta(cls) if not cls.__desc__: raise MetadataException( f\"Description cannot be blank on plugin {cls.__name__}\"", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "Plugin @param cls: The class with the plugin metadata \"\"\" metadata = ['__author__',", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "valid FuzzType on plugin {cls.__name__}\" ) if not cls.__version__: raise MetadataException(f\"Version cannot be", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "value for key, value in vars(FuzzType).items() if not key.startswith(\"__\") ]: raise MetadataException( f\"Plugin", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "not cls.__desc__: raise MetadataException( f\"Description cannot be blank on plugin {cls.__name__}\" ) if", "raise MetadataException(f\"Author cannot be empty on plugin {cls.__name__}\") if cls.__params__: _check_params_meta(cls) if not", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "f\"dict on plugin {cls.__name__}\") if not cls.__params__[key]: raise MetadataException(f\"Value of {key} cannot be", "to do so, subject to the following conditions: # # The above copyright", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from ..core.bases.base_plugin", "f\"Metadata {meta} not specified on plugin {cls.__name__}\" ) def _check_params_meta(cls: Plugin) -> None:", "if not cls.__params__['cli_list_separator']: raise MetadataException(\"Value of 'cli_list_separator' \" f\"cannot be blank on {cls.__name__}\")", "not specified on plugin {cls.__name__}\" ) def _check_params_meta(cls: Plugin) -> None: \"\"\"Checks the", "for meta in metadata: if meta not in class_attr: raise MetadataException( f\"Metadata {meta}", "class_attr: raise MetadataException( f\"Metadata {meta} not specified on plugin {cls.__name__}\" ) def _check_params_meta(cls:", "metadata: if meta not in class_attr: raise MetadataException( f\"Metadata {meta} not specified on", "on a plugin class @type cls: Plugin @param cls: The class that call", "whom the Software is # furnished to do so, subject to the following", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "None or a valid FuzzType on plugin {cls.__name__}\" ) if not cls.__version__: raise", "plugin {cls.__name__}\") if not cls.__params__[key]: raise MetadataException(f\"Value of {key} cannot be empty in", "\"when parameter type is list \" f\"on plugin {cls.__name__}\") if not cls.__params__['cli_list_separator']: raise", "free of charge, to any person obtaining a copy # of this software", "is list \" f\"on plugin {cls.__name__}\") if not cls.__params__['cli_list_separator']: raise MetadataException(\"Value of 'cli_list_separator'", "Plugin) -> Plugin: \"\"\"Decorator to check for plugin metadata on a plugin class", "with the plugin metadata \"\"\" metadata = ['__author__', '__params__', '__desc__', '__type__', '__version__'] class_attr", "class @type cls: Plugin @param cls: The class that call this decorator \"\"\"", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "raise MetadataException( f\"Metadata {meta} not specified on plugin {cls.__name__}\" ) def _check_params_meta(cls: Plugin)", "plugin {cls.__name__}\") if not cls.__params__['cli_list_separator']: raise MetadataException(\"Value of 'cli_list_separator' \" f\"cannot be blank", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "be empty on plugin {cls.__name__}\") if cls.__params__: _check_params_meta(cls) if not cls.__desc__: raise MetadataException(", "raise MetadataException(f\"Key {key} must be in parameters \" f\"dict on plugin {cls.__name__}\") if", "= cls.__params__.keys() for key in ['metavar', 'type']: if key not in param_dict_keys: raise", "\" f\"dictionary on plugin {cls.__name__}\") param_dict_keys = cls.__params__.keys() for key in ['metavar', 'type']:", "from ..exceptions import MetadataException def plugin_meta(cls: Plugin) -> Plugin: \"\"\"Decorator to check for", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "@type cls: Plugin @param cls: The class with the plugin metadata \"\"\" if", "is # furnished to do so, subject to the following conditions: # #", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "MetadataException(f\"Key {key} must be in parameters \" f\"dict on plugin {cls.__name__}\") if not", "cls.__params__[key]: raise MetadataException(f\"Value of {key} cannot be empty in \" f\"parameters dict on", "not cls.__author__: raise MetadataException(f\"Author cannot be empty on plugin {cls.__name__}\") if cls.__params__: _check_params_meta(cls)", "a plugin class @type cls: Plugin @param cls: The class that call this", "THE # SOFTWARE. from ..core.bases.base_plugin import Plugin from ..utils.consts import FuzzType from ..exceptions", "to deal # in the Software without restriction, including without limitation the rights", "in \" f\"parameters dict on plugin {cls.__name__}\") if cls.__params__['type'] is list: if 'cli_list_separator'", "to any person obtaining a copy # of this software and associated documentation", "not None and cls.__type__ not in [ value for key, value in vars(FuzzType).items()", "in all # copies or substantial portions of the Software. # # THE", "empty on plugin {cls.__name__}\") if cls.__params__: _check_params_meta(cls) if not cls.__desc__: raise MetadataException( f\"Description", "{key} cannot be empty in \" f\"parameters dict on plugin {cls.__name__}\") if cls.__params__['type']", "FuzzType from ..exceptions import MetadataException def plugin_meta(cls: Plugin) -> Plugin: \"\"\"Decorator to check", "should be None or a valid FuzzType on plugin {cls.__name__}\" ) if not", "if meta not in class_attr: raise MetadataException( f\"Metadata {meta} not specified on plugin", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "{cls.__name__}\" ) if not cls.__version__: raise MetadataException(f\"Version cannot be blank on plugin {cls.__name__}\")", "cannot be blank on plugin {cls.__name__}\") return cls def _check_mandatory_meta(cls: Plugin) -> None:", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "\"\"\"Checks the parameter metadata into the plugin decorator @type cls: Plugin @param cls:", "MetadataException(f\"Value of {key} cannot be empty in \" f\"parameters dict on plugin {cls.__name__}\")", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "<https://github.com/VitorOriel> # # Permission is hereby granted, free of charge, to any person", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "that call this decorator \"\"\" _check_mandatory_meta(cls) if not cls.__author__: raise MetadataException(f\"Author cannot be", "metadata = ['__author__', '__params__', '__desc__', '__type__', '__version__'] class_attr = vars(cls) for meta in", "key.startswith(\"__\") ]: raise MetadataException( f\"Plugin type should be None or a valid FuzzType", "'cli_list_separator' not in param_dict_keys: raise MetadataException(\"The key 'cli_list_separator' must be present \" \"when", "['metavar', 'type']: if key not in param_dict_keys: raise MetadataException(f\"Key {key} must be in", "Software, and to permit persons to whom the Software is # furnished to", "to check for plugin metadata on a plugin class @type cls: Plugin @param", "\"\"\" metadata = ['__author__', '__params__', '__desc__', '__type__', '__version__'] class_attr = vars(cls) for meta", "IN THE # SOFTWARE. from ..core.bases.base_plugin import Plugin from ..utils.consts import FuzzType from", "cannot be blank on plugin {cls.__name__}\" ) if cls.__type__ is not None and", "f\"Description cannot be blank on plugin {cls.__name__}\" ) if cls.__type__ is not None", "this software and associated documentation files (the \"Software\"), to deal # in the", "param_dict_keys: raise MetadataException(\"The key 'cli_list_separator' must be present \" \"when parameter type is", "in metadata: if meta not in class_attr: raise MetadataException( f\"Metadata {meta} not specified", "with the plugin metadata \"\"\" if (type(cls.__params__) is not dict): raise MetadataException(\"The parameters", "import FuzzType from ..exceptions import MetadataException def plugin_meta(cls: Plugin) -> Plugin: \"\"\"Decorator to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "of {key} cannot be empty in \" f\"parameters dict on plugin {cls.__name__}\") if", "cls: The class with the plugin metadata \"\"\" if (type(cls.__params__) is not dict):", "cls.__params__['type'] is list: if 'cli_list_separator' not in param_dict_keys: raise MetadataException(\"The key 'cli_list_separator' must", "plugin_meta(cls: Plugin) -> Plugin: \"\"\"Decorator to check for plugin metadata on a plugin", "granted, free of charge, to any person obtaining a copy # of this", "{cls.__name__}\" ) if cls.__type__ is not None and cls.__type__ not in [ value", "the parameter metadata into the plugin decorator @type cls: Plugin @param cls: The", "<NAME> <https://github.com/VitorOriel> # # Permission is hereby granted, free of charge, to any", "{meta} not specified on plugin {cls.__name__}\" ) def _check_params_meta(cls: Plugin) -> None: \"\"\"Checks", "vars(FuzzType).items() if not key.startswith(\"__\") ]: raise MetadataException( f\"Plugin type should be None or", "furnished to do so, subject to the following conditions: # # The above", "and this permission notice shall be included in all # copies or substantial", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "metadata into the plugin decorator @type cls: Plugin @param cls: The class with", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "def plugin_meta(cls: Plugin) -> Plugin: \"\"\"Decorator to check for plugin metadata on a", "# Permission is hereby granted, free of charge, to any person obtaining a", "key not in param_dict_keys: raise MetadataException(f\"Key {key} must be in parameters \" f\"dict", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "not in param_dict_keys: raise MetadataException(f\"Key {key} must be in parameters \" f\"dict on", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "must be a \" f\"dictionary on plugin {cls.__name__}\") param_dict_keys = cls.__params__.keys() for key", "be in parameters \" f\"dict on plugin {cls.__name__}\") if not cls.__params__[key]: raise MetadataException(f\"Value", "parameters \" f\"dict on plugin {cls.__name__}\") if not cls.__params__[key]: raise MetadataException(f\"Value of {key}", "cls: The class with the plugin metadata \"\"\" metadata = ['__author__', '__params__', '__desc__',", "on plugin {cls.__name__}\") if not cls.__params__[key]: raise MetadataException(f\"Value of {key} cannot be empty", "Copyright (c) 2020 - present <NAME> <https://github.com/VitorOriel> # # Permission is hereby granted,", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "if cls.__type__ is not None and cls.__type__ not in [ value for key,", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "f\"parameters dict on plugin {cls.__name__}\") if cls.__params__['type'] is list: if 'cli_list_separator' not in", "dict): raise MetadataException(\"The parameters must be a \" f\"dictionary on plugin {cls.__name__}\") param_dict_keys", "on plugin {cls.__name__}\" ) if cls.__type__ is not None and cls.__type__ not in", "in the Software without restriction, including without limitation the rights # to use,", "@param cls: The class that call this decorator \"\"\" _check_mandatory_meta(cls) if not cls.__author__:", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "not cls.__version__: raise MetadataException(f\"Version cannot be blank on plugin {cls.__name__}\") return cls def", "'__desc__', '__type__', '__version__'] class_attr = vars(cls) for meta in metadata: if meta not", "copies of the Software, and to permit persons to whom the Software is", "\" f\"on plugin {cls.__name__}\") if not cls.__params__['cli_list_separator']: raise MetadataException(\"Value of 'cli_list_separator' \" f\"cannot", "\" f\"dict on plugin {cls.__name__}\") if not cls.__params__[key]: raise MetadataException(f\"Value of {key} cannot", "not dict): raise MetadataException(\"The parameters must be a \" f\"dictionary on plugin {cls.__name__}\")", "must be in parameters \" f\"dict on plugin {cls.__name__}\") if not cls.__params__[key]: raise", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "import MetadataException def plugin_meta(cls: Plugin) -> Plugin: \"\"\"Decorator to check for plugin metadata", "for key in ['metavar', 'type']: if key not in param_dict_keys: raise MetadataException(f\"Key {key}", "- present <NAME> <https://github.com/VitorOriel> # # Permission is hereby granted, free of charge,", "be empty in \" f\"parameters dict on plugin {cls.__name__}\") if cls.__params__['type'] is list:", "notice shall be included in all # copies or substantial portions of the", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "@param cls: The class with the plugin metadata \"\"\" metadata = ['__author__', '__params__',", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "shall be included in all # copies or substantial portions of the Software.", "call this decorator \"\"\" _check_mandatory_meta(cls) if not cls.__author__: raise MetadataException(f\"Author cannot be empty", "The above copyright notice and this permission notice shall be included in all", "and/or sell # copies of the Software, and to permit persons to whom", "FuzzType on plugin {cls.__name__}\" ) if not cls.__version__: raise MetadataException(f\"Version cannot be blank", "'type']: if key not in param_dict_keys: raise MetadataException(f\"Key {key} must be in parameters", "dict on plugin {cls.__name__}\") if cls.__params__['type'] is list: if 'cli_list_separator' not in param_dict_keys:", "_check_params_meta(cls: Plugin) -> None: \"\"\"Checks the parameter metadata into the plugin decorator @type", "if not cls.__desc__: raise MetadataException( f\"Description cannot be blank on plugin {cls.__name__}\" )", "@param cls: The class with the plugin metadata \"\"\" if (type(cls.__params__) is not", "= ['__author__', '__params__', '__desc__', '__type__', '__version__'] class_attr = vars(cls) for meta in metadata:", "2020 - present <NAME> <https://github.com/VitorOriel> # # Permission is hereby granted, free of", "class that call this decorator \"\"\" _check_mandatory_meta(cls) if not cls.__author__: raise MetadataException(f\"Author cannot", "# in the Software without restriction, including without limitation the rights # to", "must be present \" \"when parameter type is list \" f\"on plugin {cls.__name__}\")", "]: raise MetadataException( f\"Plugin type should be None or a valid FuzzType on", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "MetadataException def plugin_meta(cls: Plugin) -> Plugin: \"\"\"Decorator to check for plugin metadata on", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "def _check_mandatory_meta(cls: Plugin) -> None: \"\"\"Checks the mandatory metadata into the plugin decorator", "..utils.consts import FuzzType from ..exceptions import MetadataException def plugin_meta(cls: Plugin) -> Plugin: \"\"\"Decorator", "cls.__desc__: raise MetadataException( f\"Description cannot be blank on plugin {cls.__name__}\" ) if cls.__type__", "plugin {cls.__name__}\") param_dict_keys = cls.__params__.keys() for key in ['metavar', 'type']: if key not", "any person obtaining a copy # of this software and associated documentation files", "# # The above copyright notice and this permission notice shall be included", "from ..utils.consts import FuzzType from ..exceptions import MetadataException def plugin_meta(cls: Plugin) -> Plugin:", "cannot be empty in \" f\"parameters dict on plugin {cls.__name__}\") if cls.__params__['type'] is", "check for plugin metadata on a plugin class @type cls: Plugin @param cls:", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "(c) 2020 - present <NAME> <https://github.com/VitorOriel> # # Permission is hereby granted, free", "plugin decorator @type cls: Plugin @param cls: The class with the plugin metadata", "specified on plugin {cls.__name__}\" ) def _check_params_meta(cls: Plugin) -> None: \"\"\"Checks the parameter", "{key} must be in parameters \" f\"dict on plugin {cls.__name__}\") if not cls.__params__[key]:", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "a copy # of this software and associated documentation files (the \"Software\"), to", "deal # in the Software without restriction, including without limitation the rights #", "OR OTHER DEALINGS IN THE # SOFTWARE. from ..core.bases.base_plugin import Plugin from ..utils.consts", "cls.__params__.keys() for key in ['metavar', 'type']: if key not in param_dict_keys: raise MetadataException(f\"Key", "present <NAME> <https://github.com/VitorOriel> # # Permission is hereby granted, free of charge, to", "plugin {cls.__name__}\" ) if not cls.__version__: raise MetadataException(f\"Version cannot be blank on plugin", "or a valid FuzzType on plugin {cls.__name__}\" ) if not cls.__version__: raise MetadataException(f\"Version", "\" \"when parameter type is list \" f\"on plugin {cls.__name__}\") if not cls.__params__['cli_list_separator']:", "in parameters \" f\"dict on plugin {cls.__name__}\") if not cls.__params__[key]: raise MetadataException(f\"Value of", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "{cls.__name__}\") if not cls.__params__[key]: raise MetadataException(f\"Value of {key} cannot be empty in \"", "be a \" f\"dictionary on plugin {cls.__name__}\") param_dict_keys = cls.__params__.keys() for key in", "charge, to any person obtaining a copy # of this software and associated", "be blank on plugin {cls.__name__}\" ) if cls.__type__ is not None and cls.__type__", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "_check_mandatory_meta(cls) if not cls.__author__: raise MetadataException(f\"Author cannot be empty on plugin {cls.__name__}\") if", "'__version__'] class_attr = vars(cls) for meta in metadata: if meta not in class_attr:", "metadata \"\"\" if (type(cls.__params__) is not dict): raise MetadataException(\"The parameters must be a", "empty in \" f\"parameters dict on plugin {cls.__name__}\") if cls.__params__['type'] is list: if", "['__author__', '__params__', '__desc__', '__type__', '__version__'] class_attr = vars(cls) for meta in metadata: if", "Plugin @param cls: The class with the plugin metadata \"\"\" if (type(cls.__params__) is", "class with the plugin metadata \"\"\" if (type(cls.__params__) is not dict): raise MetadataException(\"The", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "[ value for key, value in vars(FuzzType).items() if not key.startswith(\"__\") ]: raise MetadataException(", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "to whom the Software is # furnished to do so, subject to the", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "not cls.__params__[key]: raise MetadataException(f\"Value of {key} cannot be empty in \" f\"parameters dict", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "be included in all # copies or substantial portions of the Software. #", "The class that call this decorator \"\"\" _check_mandatory_meta(cls) if not cls.__author__: raise MetadataException(f\"Author", "if not cls.__version__: raise MetadataException(f\"Version cannot be blank on plugin {cls.__name__}\") return cls", "'__params__', '__desc__', '__type__', '__version__'] class_attr = vars(cls) for meta in metadata: if meta", "cls: Plugin @param cls: The class that call this decorator \"\"\" _check_mandatory_meta(cls) if", "present \" \"when parameter type is list \" f\"on plugin {cls.__name__}\") if not", "is not dict): raise MetadataException(\"The parameters must be a \" f\"dictionary on plugin", "{cls.__name__}\") if not cls.__params__['cli_list_separator']: raise MetadataException(\"Value of 'cli_list_separator' \" f\"cannot be blank on", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "def _check_params_meta(cls: Plugin) -> None: \"\"\"Checks the parameter metadata into the plugin decorator", "plugin {cls.__name__}\") return cls def _check_mandatory_meta(cls: Plugin) -> None: \"\"\"Checks the mandatory metadata", "\"\"\" _check_mandatory_meta(cls) if not cls.__author__: raise MetadataException(f\"Author cannot be empty on plugin {cls.__name__}\")", "and cls.__type__ not in [ value for key, value in vars(FuzzType).items() if not", "mandatory metadata into the plugin decorator @type cls: Plugin @param cls: The class", "{cls.__name__}\") return cls def _check_mandatory_meta(cls: Plugin) -> None: \"\"\"Checks the mandatory metadata into", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "is not None and cls.__type__ not in [ value for key, value in", "do so, subject to the following conditions: # # The above copyright notice", "(type(cls.__params__) is not dict): raise MetadataException(\"The parameters must be a \" f\"dictionary on", "cls.__params__: _check_params_meta(cls) if not cls.__desc__: raise MetadataException( f\"Description cannot be blank on plugin", "USE OR OTHER DEALINGS IN THE # SOFTWARE. from ..core.bases.base_plugin import Plugin from", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "list \" f\"on plugin {cls.__name__}\") if not cls.__params__['cli_list_separator']: raise MetadataException(\"Value of 'cli_list_separator' \"", "None: \"\"\"Checks the parameter metadata into the plugin decorator @type cls: Plugin @param", "permit persons to whom the Software is # furnished to do so, subject", "cls.__type__ not in [ value for key, value in vars(FuzzType).items() if not key.startswith(\"__\")", "Permission is hereby granted, free of charge, to any person obtaining a copy", "in ['metavar', 'type']: if key not in param_dict_keys: raise MetadataException(f\"Key {key} must be", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "if key not in param_dict_keys: raise MetadataException(f\"Key {key} must be in parameters \"", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Software without restriction, including without limitation the rights # to use, copy, modify,", "is list: if 'cli_list_separator' not in param_dict_keys: raise MetadataException(\"The key 'cli_list_separator' must be", "value in vars(FuzzType).items() if not key.startswith(\"__\") ]: raise MetadataException( f\"Plugin type should be", "Plugin: \"\"\"Decorator to check for plugin metadata on a plugin class @type cls:", "if (type(cls.__params__) is not dict): raise MetadataException(\"The parameters must be a \" f\"dictionary", "# The above copyright notice and this permission notice shall be included in", "'cli_list_separator' must be present \" \"when parameter type is list \" f\"on plugin", "# of this software and associated documentation files (the \"Software\"), to deal #", "parameter type is list \" f\"on plugin {cls.__name__}\") if not cls.__params__['cli_list_separator']: raise MetadataException(\"Value", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "above copyright notice and this permission notice shall be included in all #", "sell # copies of the Software, and to permit persons to whom the", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "f\"Plugin type should be None or a valid FuzzType on plugin {cls.__name__}\" )", "key 'cli_list_separator' must be present \" \"when parameter type is list \" f\"on", "-> None: \"\"\"Checks the mandatory metadata into the plugin decorator @type cls: Plugin", "in class_attr: raise MetadataException( f\"Metadata {meta} not specified on plugin {cls.__name__}\" ) def", "{cls.__name__}\") if cls.__params__['type'] is list: if 'cli_list_separator' not in param_dict_keys: raise MetadataException(\"The key", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "on plugin {cls.__name__}\") param_dict_keys = cls.__params__.keys() for key in ['metavar', 'type']: if key", "MetadataException(\"The key 'cli_list_separator' must be present \" \"when parameter type is list \"", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "if cls.__params__['type'] is list: if 'cli_list_separator' not in param_dict_keys: raise MetadataException(\"The key 'cli_list_separator'", "not in param_dict_keys: raise MetadataException(\"The key 'cli_list_separator' must be present \" \"when parameter", "# # Permission is hereby granted, free of charge, to any person obtaining", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "this permission notice shall be included in all # copies or substantial portions", "the mandatory metadata into the plugin decorator @type cls: Plugin @param cls: The", "\"\"\" if (type(cls.__params__) is not dict): raise MetadataException(\"The parameters must be a \"", "not in [ value for key, value in vars(FuzzType).items() if not key.startswith(\"__\") ]:", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "# copies or substantial portions of the Software. # # THE SOFTWARE IS", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "{cls.__name__}\") if cls.__params__: _check_params_meta(cls) if not cls.__desc__: raise MetadataException( f\"Description cannot be blank", ") if cls.__type__ is not None and cls.__type__ not in [ value for", "cls: Plugin @param cls: The class with the plugin metadata \"\"\" metadata =", "files (the \"Software\"), to deal # in the Software without restriction, including without", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "vars(cls) for meta in metadata: if meta not in class_attr: raise MetadataException( f\"Metadata", "not in class_attr: raise MetadataException( f\"Metadata {meta} not specified on plugin {cls.__name__}\" )", "Plugin) -> None: \"\"\"Checks the parameter metadata into the plugin decorator @type cls:", "..exceptions import MetadataException def plugin_meta(cls: Plugin) -> Plugin: \"\"\"Decorator to check for plugin", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "@type cls: Plugin @param cls: The class that call this decorator \"\"\" _check_mandatory_meta(cls)", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "in vars(FuzzType).items() if not key.startswith(\"__\") ]: raise MetadataException( f\"Plugin type should be None", "\"\"\"Decorator to check for plugin metadata on a plugin class @type cls: Plugin", "following conditions: # # The above copyright notice and this permission notice shall", "of the Software, and to permit persons to whom the Software is #", "on plugin {cls.__name__}\") if cls.__params__: _check_params_meta(cls) if not cls.__desc__: raise MetadataException( f\"Description cannot", "param_dict_keys: raise MetadataException(f\"Key {key} must be in parameters \" f\"dict on plugin {cls.__name__}\")", "the plugin decorator @type cls: Plugin @param cls: The class with the plugin", "plugin class @type cls: Plugin @param cls: The class that call this decorator", "-> Plugin: \"\"\"Decorator to check for plugin metadata on a plugin class @type", "plugin {cls.__name__}\") if cls.__params__['type'] is list: if 'cli_list_separator' not in param_dict_keys: raise MetadataException(\"The", "if not cls.__params__[key]: raise MetadataException(f\"Value of {key} cannot be empty in \" f\"parameters", "\"\"\"Checks the mandatory metadata into the plugin decorator @type cls: Plugin @param cls:", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "None and cls.__type__ not in [ value for key, value in vars(FuzzType).items() if", "None: \"\"\"Checks the mandatory metadata into the plugin decorator @type cls: Plugin @param", "be None or a valid FuzzType on plugin {cls.__name__}\" ) if not cls.__version__:", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "be blank on plugin {cls.__name__}\") return cls def _check_mandatory_meta(cls: Plugin) -> None: \"\"\"Checks", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "if not cls.__author__: raise MetadataException(f\"Author cannot be empty on plugin {cls.__name__}\") if cls.__params__:", "metadata on a plugin class @type cls: Plugin @param cls: The class that", "blank on plugin {cls.__name__}\") return cls def _check_mandatory_meta(cls: Plugin) -> None: \"\"\"Checks the", "\" f\"parameters dict on plugin {cls.__name__}\") if cls.__params__['type'] is list: if 'cli_list_separator' not", "copyright notice and this permission notice shall be included in all # copies", "key in ['metavar', 'type']: if key not in param_dict_keys: raise MetadataException(f\"Key {key} must", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "decorator \"\"\" _check_mandatory_meta(cls) if not cls.__author__: raise MetadataException(f\"Author cannot be empty on plugin", "..core.bases.base_plugin import Plugin from ..utils.consts import FuzzType from ..exceptions import MetadataException def plugin_meta(cls:", "'__type__', '__version__'] class_attr = vars(cls) for meta in metadata: if meta not in", "type is list \" f\"on plugin {cls.__name__}\") if not cls.__params__['cli_list_separator']: raise MetadataException(\"Value of", "{cls.__name__}\") param_dict_keys = cls.__params__.keys() for key in ['metavar', 'type']: if key not in", "hereby granted, free of charge, to any person obtaining a copy # of", "of this software and associated documentation files (the \"Software\"), to deal # in", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "a \" f\"dictionary on plugin {cls.__name__}\") param_dict_keys = cls.__params__.keys() for key in ['metavar',", "Plugin @param cls: The class that call this decorator \"\"\" _check_mandatory_meta(cls) if not", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "plugin metadata on a plugin class @type cls: Plugin @param cls: The class", "raise MetadataException( f\"Plugin type should be None or a valid FuzzType on plugin", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from ..core.bases.base_plugin import Plugin", "# SOFTWARE. from ..core.bases.base_plugin import Plugin from ..utils.consts import FuzzType from ..exceptions import", "plugin {cls.__name__}\") if cls.__params__: _check_params_meta(cls) if not cls.__desc__: raise MetadataException( f\"Description cannot be", "meta not in class_attr: raise MetadataException( f\"Metadata {meta} not specified on plugin {cls.__name__}\"", "f\"on plugin {cls.__name__}\") if not cls.__params__['cli_list_separator']: raise MetadataException(\"Value of 'cli_list_separator' \" f\"cannot be", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "if 'cli_list_separator' not in param_dict_keys: raise MetadataException(\"The key 'cli_list_separator' must be present \"", "plugin {cls.__name__}\" ) def _check_params_meta(cls: Plugin) -> None: \"\"\"Checks the parameter metadata into", "The class with the plugin metadata \"\"\" if (type(cls.__params__) is not dict): raise", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "decorator @type cls: Plugin @param cls: The class with the plugin metadata \"\"\"", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this" ]
[ "Instantiate the root app root = Aurora() # Run the root app if", "import Aurora # Instantiate the root app root = Aurora() # Run the", "aurora import Aurora # Instantiate the root app root = Aurora() # Run", "<filename>app.py # Dependencies from aurora import Aurora # Instantiate the root app root", "# Instantiate the root app root = Aurora() # Run the root app", "the root app root = Aurora() # Run the root app if __name__", "from aurora import Aurora # Instantiate the root app root = Aurora() #", "# Dependencies from aurora import Aurora # Instantiate the root app root =", "Dependencies from aurora import Aurora # Instantiate the root app root = Aurora()", "app root = Aurora() # Run the root app if __name__ == '__main__':", "root app root = Aurora() # Run the root app if __name__ ==", "root = Aurora() # Run the root app if __name__ == '__main__': root.run()", "Aurora # Instantiate the root app root = Aurora() # Run the root" ]
[ "fields=\"+l\", exclude=None, ctags=\"/usr/local/bin/ctags\", creates='tags'): from os.path import join if fields is None: fields", "fields is None: fields = [] elif isinstance(fields, str): fields = [fields] fields", "\"{ctags} -R {fields} {exclude} .\".format(ctags=ctags, fields=fields, exclude=exclude) return __states__['cmd.run']( name=cmd, cwd=name, creates=join(name, creates))", "\" --exclude=\".join([\"\"] + exclude) cmd = \"{ctags} -R {fields} {exclude} .\".format(ctags=ctags, fields=fields, exclude=exclude)", "from os.path import join if fields is None: fields = [] elif isinstance(fields,", "if fields is None: fields = [] elif isinstance(fields, str): fields = [fields]", "str): exclude = [exclude] exclude = \" --exclude=\".join([\"\"] + exclude) cmd = \"{ctags}", "str): fields = [fields] fields = \" --fields=\".join([\"\"] + fields) if exclude is", "elif isinstance(fields, str): fields = [fields] fields = \" --fields=\".join([\"\"] + fields) if", "exclude = [] elif isinstance(exclude, str): exclude = [exclude] exclude = \" --exclude=\".join([\"\"]", "[] elif isinstance(exclude, str): exclude = [exclude] exclude = \" --exclude=\".join([\"\"] + exclude)", "def run(name, fields=\"+l\", exclude=None, ctags=\"/usr/local/bin/ctags\", creates='tags'): from os.path import join if fields is", "is None: fields = [] elif isinstance(fields, str): fields = [fields] fields =", "= [] elif isinstance(exclude, str): exclude = [exclude] exclude = \" --exclude=\".join([\"\"] +", "exclude = [exclude] exclude = \" --exclude=\".join([\"\"] + exclude) cmd = \"{ctags} -R", "[exclude] exclude = \" --exclude=\".join([\"\"] + exclude) cmd = \"{ctags} -R {fields} {exclude}", "[fields] fields = \" --fields=\".join([\"\"] + fields) if exclude is None: exclude =", "elif isinstance(exclude, str): exclude = [exclude] exclude = \" --exclude=\".join([\"\"] + exclude) cmd", "+ fields) if exclude is None: exclude = [] elif isinstance(exclude, str): exclude", "is None: exclude = [] elif isinstance(exclude, str): exclude = [exclude] exclude =", "None: exclude = [] elif isinstance(exclude, str): exclude = [exclude] exclude = \"", "+ exclude) cmd = \"{ctags} -R {fields} {exclude} .\".format(ctags=ctags, fields=fields, exclude=exclude) return __states__['cmd.run'](", "= \" --exclude=\".join([\"\"] + exclude) cmd = \"{ctags} -R {fields} {exclude} .\".format(ctags=ctags, fields=fields,", "= [] elif isinstance(fields, str): fields = [fields] fields = \" --fields=\".join([\"\"] +", "isinstance(exclude, str): exclude = [exclude] exclude = \" --exclude=\".join([\"\"] + exclude) cmd =", "isinstance(fields, str): fields = [fields] fields = \" --fields=\".join([\"\"] + fields) if exclude", "--exclude=\".join([\"\"] + exclude) cmd = \"{ctags} -R {fields} {exclude} .\".format(ctags=ctags, fields=fields, exclude=exclude) return", "\" --fields=\".join([\"\"] + fields) if exclude is None: exclude = [] elif isinstance(exclude,", "fields = [fields] fields = \" --fields=\".join([\"\"] + fields) if exclude is None:", "cmd = \"{ctags} -R {fields} {exclude} .\".format(ctags=ctags, fields=fields, exclude=exclude) return __states__['cmd.run']( name=cmd, cwd=name,", "exclude = \" --exclude=\".join([\"\"] + exclude) cmd = \"{ctags} -R {fields} {exclude} .\".format(ctags=ctags,", "= [exclude] exclude = \" --exclude=\".join([\"\"] + exclude) cmd = \"{ctags} -R {fields}", "= \"{ctags} -R {fields} {exclude} .\".format(ctags=ctags, fields=fields, exclude=exclude) return __states__['cmd.run']( name=cmd, cwd=name, creates=join(name,", "None: fields = [] elif isinstance(fields, str): fields = [fields] fields = \"", "fields = \" --fields=\".join([\"\"] + fields) if exclude is None: exclude = []", "= [fields] fields = \" --fields=\".join([\"\"] + fields) if exclude is None: exclude", "if exclude is None: exclude = [] elif isinstance(exclude, str): exclude = [exclude]", "os.path import join if fields is None: fields = [] elif isinstance(fields, str):", "--fields=\".join([\"\"] + fields) if exclude is None: exclude = [] elif isinstance(exclude, str):", "exclude is None: exclude = [] elif isinstance(exclude, str): exclude = [exclude] exclude", "ctags=\"/usr/local/bin/ctags\", creates='tags'): from os.path import join if fields is None: fields = []", "= \" --fields=\".join([\"\"] + fields) if exclude is None: exclude = [] elif", "exclude) cmd = \"{ctags} -R {fields} {exclude} .\".format(ctags=ctags, fields=fields, exclude=exclude) return __states__['cmd.run']( name=cmd,", "exclude=None, ctags=\"/usr/local/bin/ctags\", creates='tags'): from os.path import join if fields is None: fields =", "creates='tags'): from os.path import join if fields is None: fields = [] elif", "fields = [] elif isinstance(fields, str): fields = [fields] fields = \" --fields=\".join([\"\"]", "import join if fields is None: fields = [] elif isinstance(fields, str): fields", "fields) if exclude is None: exclude = [] elif isinstance(exclude, str): exclude =", "[] elif isinstance(fields, str): fields = [fields] fields = \" --fields=\".join([\"\"] + fields)", "run(name, fields=\"+l\", exclude=None, ctags=\"/usr/local/bin/ctags\", creates='tags'): from os.path import join if fields is None:", "join if fields is None: fields = [] elif isinstance(fields, str): fields =" ]
[ "use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the", "IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE", "open('requirements.txt', encoding='utf-8') as reqs_file: REQUIREMENTS = reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman manifest generator\", packages=['manifestgen'],", "setuptools import setup with open('requirements.txt', encoding='utf-8') as reqs_file: REQUIREMENTS = reqs_file.read().splitlines() setup( name='manifestgen',", "is hereby granted, free of charge, to any person obtaining a # copy", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "# copy of this software and associated documentation files (the \"Software\"), # to", "do so, subject to the following conditions: # # The above copyright notice", "notice and this permission notice shall be included # in all copies or", "files (the \"Software\"), # to deal in the Software without restriction, including without", "Hewlett Packard Enterprise Development LP # # Permission is hereby granted, free of", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "deal in the Software without restriction, including without limitation # the rights to", "rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies", "the following conditions: # # The above copyright notice and this permission notice", "NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to", "copy of this software and associated documentation files (the \"Software\"), # to deal", "THE SOFTWARE. from setuptools import setup with open('requirements.txt', encoding='utf-8') as reqs_file: REQUIREMENTS =", "to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of", "charge, to any person obtaining a # copy of this software and associated", "including without limitation # the rights to use, copy, modify, merge, publish, distribute,", "\"Software\"), # to deal in the Software without restriction, including without limitation #", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense,", "is furnished to do so, subject to the following conditions: # # The", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS", "publish, distribute, sublicense, # and/or sell copies of the Software, and to permit", "notice shall be included # in all copies or substantial portions of the", "IN THE SOFTWARE. from setuptools import setup with open('requirements.txt', encoding='utf-8') as reqs_file: REQUIREMENTS", "sell copies of the Software, and to permit persons to whom the #", "Copyright [2020] Hewlett Packard Enterprise Development LP # # Permission is hereby granted,", "a # copy of this software and associated documentation files (the \"Software\"), #", "as reqs_file: REQUIREMENTS = reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman manifest generator\", packages=['manifestgen'], include_package_data=True, install_requires=[REQUIREMENTS],", "reqs_file: REQUIREMENTS = reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman manifest generator\", packages=['manifestgen'], include_package_data=True, install_requires=[REQUIREMENTS], entry_points='''", "MIT License # # (C) Copyright [2020] Hewlett Packard Enterprise Development LP #", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR #", "of charge, to any person obtaining a # copy of this software and", "ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN", "to do so, subject to the following conditions: # # The above copyright", "and to permit persons to whom the # Software is furnished to do", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF", "conditions: # # The above copyright notice and this permission notice shall be", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT", "and this permission notice shall be included # in all copies or substantial", "DEALINGS IN THE SOFTWARE. from setuptools import setup with open('requirements.txt', encoding='utf-8') as reqs_file:", "Software, and to permit persons to whom the # Software is furnished to", "# # (C) Copyright [2020] Hewlett Packard Enterprise Development LP # # Permission", "(C) Copyright [2020] Hewlett Packard Enterprise Development LP # # Permission is hereby", "and associated documentation files (the \"Software\"), # to deal in the Software without", "associated documentation files (the \"Software\"), # to deal in the Software without restriction,", "limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, #", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE", "OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER", "person obtaining a # copy of this software and associated documentation files (the", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR", "in the Software without restriction, including without limitation # the rights to use,", "of the Software, and to permit persons to whom the # Software is", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from", "# # Permission is hereby granted, free of charge, to any person obtaining", "all copies or substantial portions of the Software. # # THE SOFTWARE IS", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "Packard Enterprise Development LP # # Permission is hereby granted, free of charge,", "the # Software is furnished to do so, subject to the following conditions:", "documentation files (the \"Software\"), # to deal in the Software without restriction, including", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR", "SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from setuptools", "copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software,", "SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "so, subject to the following conditions: # # The above copyright notice and", "DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR #", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS", "# The above copyright notice and this permission notice shall be included #", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "The above copyright notice and this permission notice shall be included # in", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "with open('requirements.txt', encoding='utf-8') as reqs_file: REQUIREMENTS = reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman manifest generator\",", "shall be included # in all copies or substantial portions of the Software.", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM,", "= reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman manifest generator\", packages=['manifestgen'], include_package_data=True, install_requires=[REQUIREMENTS], entry_points=''' [console_scripts] manifestgen=manifestgen.generate:main", "TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER", "to the following conditions: # # The above copyright notice and this permission", "CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH", "CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "obtaining a # copy of this software and associated documentation files (the \"Software\"),", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "distribute, sublicense, # and/or sell copies of the Software, and to permit persons", "included # in all copies or substantial portions of the Software. # #", "[2020] Hewlett Packard Enterprise Development LP # # Permission is hereby granted, free", "without restriction, including without limitation # the rights to use, copy, modify, merge,", "following conditions: # # The above copyright notice and this permission notice shall", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE.", "Enterprise Development LP # # Permission is hereby granted, free of charge, to", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "free of charge, to any person obtaining a # copy of this software", "granted, free of charge, to any person obtaining a # copy of this", "this software and associated documentation files (the \"Software\"), # to deal in the", "USE OR # OTHER DEALINGS IN THE SOFTWARE. from setuptools import setup with", "setup with open('requirements.txt', encoding='utf-8') as reqs_file: REQUIREMENTS = reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman manifest", "Software without restriction, including without limitation # the rights to use, copy, modify,", "# MIT License # # (C) Copyright [2020] Hewlett Packard Enterprise Development LP", "reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman manifest generator\", packages=['manifestgen'], include_package_data=True, install_requires=[REQUIREMENTS], entry_points=''' [console_scripts] manifestgen=manifestgen.generate:main '''", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "# # The above copyright notice and this permission notice shall be included", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY,", "# to deal in the Software without restriction, including without limitation # the", "permit persons to whom the # Software is furnished to do so, subject", "# and/or sell copies of the Software, and to permit persons to whom", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "copies of the Software, and to permit persons to whom the # Software", "from setuptools import setup with open('requirements.txt', encoding='utf-8') as reqs_file: REQUIREMENTS = reqs_file.read().splitlines() setup(", "restriction, including without limitation # the rights to use, copy, modify, merge, publish,", "encoding='utf-8') as reqs_file: REQUIREMENTS = reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman manifest generator\", packages=['manifestgen'], include_package_data=True,", "REQUIREMENTS = reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman manifest generator\", packages=['manifestgen'], include_package_data=True, install_requires=[REQUIREMENTS], entry_points=''' [console_scripts]", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT", "(the \"Software\"), # to deal in the Software without restriction, including without limitation", "ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "to permit persons to whom the # Software is furnished to do so,", "# Software is furnished to do so, subject to the following conditions: #", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "Permission is hereby granted, free of charge, to any person obtaining a #", "OR # OTHER DEALINGS IN THE SOFTWARE. from setuptools import setup with open('requirements.txt',", "the Software without restriction, including without limitation # the rights to use, copy,", "sublicense, # and/or sell copies of the Software, and to permit persons to", "# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, #", "to deal in the Software without restriction, including without limitation # the rights", "the Software, and to permit persons to whom the # Software is furnished", "# (C) Copyright [2020] Hewlett Packard Enterprise Development LP # # Permission is", "copyright notice and this permission notice shall be included # in all copies", "SOFTWARE. from setuptools import setup with open('requirements.txt', encoding='utf-8') as reqs_file: REQUIREMENTS = reqs_file.read().splitlines()", "whom the # Software is furnished to do so, subject to the following", "Software is furnished to do so, subject to the following conditions: # #", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "be included # in all copies or substantial portions of the Software. #", "OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from setuptools import", "to whom the # Software is furnished to do so, subject to the", "License # # (C) Copyright [2020] Hewlett Packard Enterprise Development LP # #", "to any person obtaining a # copy of this software and associated documentation", "import setup with open('requirements.txt', encoding='utf-8') as reqs_file: REQUIREMENTS = reqs_file.read().splitlines() setup( name='manifestgen', description=\"Loftsman", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN", "# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "hereby granted, free of charge, to any person obtaining a # copy of", "# OTHER DEALINGS IN THE SOFTWARE. from setuptools import setup with open('requirements.txt', encoding='utf-8')", "AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "# in all copies or substantial portions of the Software. # # THE", "# the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or", "FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF", "BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN", "THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from setuptools import setup", "permission notice shall be included # in all copies or substantial portions of", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "in all copies or substantial portions of the Software. # # THE SOFTWARE", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "LP # # Permission is hereby granted, free of charge, to any person", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #", "and/or sell copies of the Software, and to permit persons to whom the", "Development LP # # Permission is hereby granted, free of charge, to any", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN", "furnished to do so, subject to the following conditions: # # The above", "any person obtaining a # copy of this software and associated documentation files", "software and associated documentation files (the \"Software\"), # to deal in the Software", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and", "# Permission is hereby granted, free of charge, to any person obtaining a", "persons to whom the # Software is furnished to do so, subject to", "this permission notice shall be included # in all copies or substantial portions", "NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "OTHER DEALINGS IN THE SOFTWARE. from setuptools import setup with open('requirements.txt', encoding='utf-8') as", "setup( name='manifestgen', description=\"Loftsman manifest generator\", packages=['manifestgen'], include_package_data=True, install_requires=[REQUIREMENTS], entry_points=''' [console_scripts] manifestgen=manifestgen.generate:main ''' )", "the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell", "subject to the following conditions: # # The above copyright notice and this", "of this software and associated documentation files (the \"Software\"), # to deal in", "above copyright notice and this permission notice shall be included # in all" ]
[]
[ "+= os.path.basename(localPath) with open(localPath, 'rb') as fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath, fp):", "open(localPath, 'rb') as fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath, fp): print '[ERROR] upload", "timeout = 30, endpoint = upyun.ED_AUTO) except Exception, e: print '[ERROR]Login error:%s' %str(e)", "localPath: print '[ERROR]Local file %s not exists' %localPath return if remotePath.endswith('/'): remotePath +=", "self.operator.get(remotePath, fp): print '[ERROR]Download file %s failed' %remotePath return except Exception, e: print", "BUCKETNAME = UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator = None def login(self, *arg, **wargs): print", "return if remotePath.endswith('/'): remotePath += os.path.basename(localPath) with open(localPath, 'rb') as fp: try: self.operator.mkdir(os.path.dirname(remotePath))", "arg try: self.operator = upyun.UpYun(self.BUCKETNAME, user, pwd, timeout = 30, endpoint = upyun.ED_AUTO)", "upyun import os from .config import UPYUNCONFIG from .trans import TranslatorIf class UpyunCli(TranslatorIf):", "%s not exists' %localPath return if remotePath.endswith('/'): remotePath += os.path.basename(localPath) with open(localPath, 'rb')", "'wb') as fp: try: if not self.operator.get(remotePath, fp): print '[ERROR]Download file %s failed'", "if not localPath: print '[ERROR]Local file %s not exists' %localPath return if remotePath.endswith('/'):", "from .trans import TranslatorIf class UpyunCli(TranslatorIf): '''Implemented of up yun client, inhanced from", "encoding: utf-8 import upyun import os from .config import UPYUNCONFIG from .trans import", "'[INFO]Upload file %s success!' %localPath def download(self, remotePath, localPath): with open(localPath, 'wb') as", "try: self.operator = upyun.UpYun(self.BUCKETNAME, user, pwd, timeout = 30, endpoint = upyun.ED_AUTO) except", "print '[ERROR] upload file %s error' %localPath return except Exception, e: print '[ERROR]", "user, pwd = arg try: self.operator = upyun.UpYun(self.BUCKETNAME, user, pwd, timeout = 30,", "'[ERROR] upload file %s error' %localPath return except Exception, e: print '[ERROR] upload", "except Exception, e: print '[ERROR]Download file error:%s' %str(e) return print '[INFO]Download file %s", "%s error' %localPath return except Exception, e: print '[ERROR] upload file except:%s' %str(e)", "failed' %remotePath return except Exception, e: print '[ERROR]Download file error:%s' %str(e) return print", "def upload(self, localPath, remotePath): if not localPath: print '[ERROR]Local file %s not exists'", "with open(localPath, 'rb') as fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath, fp): print '[ERROR]", "Exception, e: print '[ERROR]Download file error:%s' %str(e) return print '[INFO]Download file %s success'", "login(self, *arg, **wargs): print arg user, pwd = arg try: self.operator = upyun.UpYun(self.BUCKETNAME,", "'[ERROR]Login error:%s' %str(e) return return True def upload(self, localPath, remotePath): if not localPath:", "os.path.basename(localPath) with open(localPath, 'rb') as fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath, fp): print", "file %s failed' %remotePath return except Exception, e: print '[ERROR]Download file error:%s' %str(e)", "not self.operator.put(remotePath, fp): print '[ERROR] upload file %s error' %localPath return except Exception,", "exists' %localPath return if remotePath.endswith('/'): remotePath += os.path.basename(localPath) with open(localPath, 'rb') as fp:", "upload file %s error' %localPath return except Exception, e: print '[ERROR] upload file", "print '[ERROR]Download file %s failed' %remotePath return except Exception, e: print '[ERROR]Download file", "= upyun.ED_AUTO) except Exception, e: print '[ERROR]Login error:%s' %str(e) return return True def", "30, endpoint = upyun.ED_AUTO) except Exception, e: print '[ERROR]Login error:%s' %str(e) return return", "TranslatorIf class UpyunCli(TranslatorIf): '''Implemented of up yun client, inhanced from Translator''' BUCKETNAME =", "localPath, remotePath): if not localPath: print '[ERROR]Local file %s not exists' %localPath return", "as fp: try: if not self.operator.get(remotePath, fp): print '[ERROR]Download file %s failed' %remotePath", "file %s success!' %localPath def download(self, remotePath, localPath): with open(localPath, 'wb') as fp:", "print arg user, pwd = arg try: self.operator = upyun.UpYun(self.BUCKETNAME, user, pwd, timeout", "UPYUNCONFIG from .trans import TranslatorIf class UpyunCli(TranslatorIf): '''Implemented of up yun client, inhanced", "endpoint = upyun.ED_AUTO) except Exception, e: print '[ERROR]Login error:%s' %str(e) return return True", "except:%s' %str(e) return print '[INFO]Upload file %s success!' %localPath def download(self, remotePath, localPath):", "download(self, remotePath, localPath): with open(localPath, 'wb') as fp: try: if not self.operator.get(remotePath, fp):", "print '[ERROR] upload file except:%s' %str(e) return print '[INFO]Upload file %s success!' %localPath", "upload(self, localPath, remotePath): if not localPath: print '[ERROR]Local file %s not exists' %localPath", "import TranslatorIf class UpyunCli(TranslatorIf): '''Implemented of up yun client, inhanced from Translator''' BUCKETNAME", "= upyun.UpYun(self.BUCKETNAME, user, pwd, timeout = 30, endpoint = upyun.ED_AUTO) except Exception, e:", "of up yun client, inhanced from Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator", "self.operator.put(remotePath, fp): print '[ERROR] upload file %s error' %localPath return except Exception, e:", "try: self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath, fp): print '[ERROR] upload file %s error' %localPath", "from .config import UPYUNCONFIG from .trans import TranslatorIf class UpyunCli(TranslatorIf): '''Implemented of up", "yun client, inhanced from Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator = None", "error' %localPath return except Exception, e: print '[ERROR] upload file except:%s' %str(e) return", "utf-8 import upyun import os from .config import UPYUNCONFIG from .trans import TranslatorIf", "remotePath += os.path.basename(localPath) with open(localPath, 'rb') as fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath,", "#!/usr/bin/env python # encoding: utf-8 import upyun import os from .config import UPYUNCONFIG", "def download(self, remotePath, localPath): with open(localPath, 'wb') as fp: try: if not self.operator.get(remotePath,", "not exists' %localPath return if remotePath.endswith('/'): remotePath += os.path.basename(localPath) with open(localPath, 'rb') as", "%localPath return except Exception, e: print '[ERROR] upload file except:%s' %str(e) return print", "= arg try: self.operator = upyun.UpYun(self.BUCKETNAME, user, pwd, timeout = 30, endpoint =", "import UPYUNCONFIG from .trans import TranslatorIf class UpyunCli(TranslatorIf): '''Implemented of up yun client,", "remotePath): if not localPath: print '[ERROR]Local file %s not exists' %localPath return if", "Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator = None def login(self, *arg, **wargs):", "from Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator = None def login(self, *arg,", "remotePath, localPath): with open(localPath, 'wb') as fp: try: if not self.operator.get(remotePath, fp): print", "os from .config import UPYUNCONFIG from .trans import TranslatorIf class UpyunCli(TranslatorIf): '''Implemented of", "user, pwd, timeout = 30, endpoint = upyun.ED_AUTO) except Exception, e: print '[ERROR]Login", "localPath): with open(localPath, 'wb') as fp: try: if not self.operator.get(remotePath, fp): print '[ERROR]Download", "fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath, fp): print '[ERROR] upload file %s error'", "fp: try: if not self.operator.get(remotePath, fp): print '[ERROR]Download file %s failed' %remotePath return", "%str(e) return return True def upload(self, localPath, remotePath): if not localPath: print '[ERROR]Local", "= 30, endpoint = upyun.ED_AUTO) except Exception, e: print '[ERROR]Login error:%s' %str(e) return", "print '[ERROR]Local file %s not exists' %localPath return if remotePath.endswith('/'): remotePath += os.path.basename(localPath)", "success!' %localPath def download(self, remotePath, localPath): with open(localPath, 'wb') as fp: try: if", "%localPath def download(self, remotePath, localPath): with open(localPath, 'wb') as fp: try: if not", "open(localPath, 'wb') as fp: try: if not self.operator.get(remotePath, fp): print '[ERROR]Download file %s", "UpyunCli(TranslatorIf): '''Implemented of up yun client, inhanced from Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME def", "fp): print '[ERROR]Download file %s failed' %remotePath return except Exception, e: print '[ERROR]Download", "not self.operator.get(remotePath, fp): print '[ERROR]Download file %s failed' %remotePath return except Exception, e:", "not localPath: print '[ERROR]Local file %s not exists' %localPath return if remotePath.endswith('/'): remotePath", "error:%s' %str(e) return return True def upload(self, localPath, remotePath): if not localPath: print", "file %s not exists' %localPath return if remotePath.endswith('/'): remotePath += os.path.basename(localPath) with open(localPath,", "up yun client, inhanced from Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator =", "as fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath, fp): print '[ERROR] upload file %s", "return return True def upload(self, localPath, remotePath): if not localPath: print '[ERROR]Local file", "remotePath.endswith('/'): remotePath += os.path.basename(localPath) with open(localPath, 'rb') as fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if not", "if not self.operator.get(remotePath, fp): print '[ERROR]Download file %s failed' %remotePath return except Exception,", "self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath, fp): print '[ERROR] upload file %s error' %localPath return", "pwd, timeout = 30, endpoint = upyun.ED_AUTO) except Exception, e: print '[ERROR]Login error:%s'", "<reponame>Mr8/dropup #!/usr/bin/env python # encoding: utf-8 import upyun import os from .config import", "e: print '[ERROR]Login error:%s' %str(e) return return True def upload(self, localPath, remotePath): if", "pwd = arg try: self.operator = upyun.UpYun(self.BUCKETNAME, user, pwd, timeout = 30, endpoint", "%s success!' %localPath def download(self, remotePath, localPath): with open(localPath, 'wb') as fp: try:", "%str(e) return print '[INFO]Upload file %s success!' %localPath def download(self, remotePath, localPath): with", "%localPath return if remotePath.endswith('/'): remotePath += os.path.basename(localPath) with open(localPath, 'rb') as fp: try:", "return except Exception, e: print '[ERROR] upload file except:%s' %str(e) return print '[INFO]Upload", "True def upload(self, localPath, remotePath): if not localPath: print '[ERROR]Local file %s not", "file except:%s' %str(e) return print '[INFO]Upload file %s success!' %localPath def download(self, remotePath,", "import upyun import os from .config import UPYUNCONFIG from .trans import TranslatorIf class", "= None def login(self, *arg, **wargs): print arg user, pwd = arg try:", "def login(self, *arg, **wargs): print arg user, pwd = arg try: self.operator =", "%remotePath return except Exception, e: print '[ERROR]Download file error:%s' %str(e) return print '[INFO]Download", "return print '[INFO]Upload file %s success!' %localPath def download(self, remotePath, localPath): with open(localPath,", "if not self.operator.put(remotePath, fp): print '[ERROR] upload file %s error' %localPath return except", "e: print '[ERROR] upload file except:%s' %str(e) return print '[INFO]Upload file %s success!'", ".config import UPYUNCONFIG from .trans import TranslatorIf class UpyunCli(TranslatorIf): '''Implemented of up yun", "__init__(self): self.operator = None def login(self, *arg, **wargs): print arg user, pwd =", "*arg, **wargs): print arg user, pwd = arg try: self.operator = upyun.UpYun(self.BUCKETNAME, user,", "arg user, pwd = arg try: self.operator = upyun.UpYun(self.BUCKETNAME, user, pwd, timeout =", "upyun.ED_AUTO) except Exception, e: print '[ERROR]Login error:%s' %str(e) return return True def upload(self,", "Exception, e: print '[ERROR]Login error:%s' %str(e) return return True def upload(self, localPath, remotePath):", "UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator = None def login(self, *arg, **wargs): print arg user,", "file %s error' %localPath return except Exception, e: print '[ERROR] upload file except:%s'", ".trans import TranslatorIf class UpyunCli(TranslatorIf): '''Implemented of up yun client, inhanced from Translator'''", "def __init__(self): self.operator = None def login(self, *arg, **wargs): print arg user, pwd", "with open(localPath, 'wb') as fp: try: if not self.operator.get(remotePath, fp): print '[ERROR]Download file", "self.operator = None def login(self, *arg, **wargs): print arg user, pwd = arg", "print '[ERROR]Login error:%s' %str(e) return return True def upload(self, localPath, remotePath): if not", "return except Exception, e: print '[ERROR]Download file error:%s' %str(e) return print '[INFO]Download file", "return True def upload(self, localPath, remotePath): if not localPath: print '[ERROR]Local file %s", "'rb') as fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if not self.operator.put(remotePath, fp): print '[ERROR] upload file", "fp): print '[ERROR] upload file %s error' %localPath return except Exception, e: print", "'[ERROR]Download file %s failed' %remotePath return except Exception, e: print '[ERROR]Download file error:%s'", "upload file except:%s' %str(e) return print '[INFO]Upload file %s success!' %localPath def download(self,", "e: print '[ERROR]Download file error:%s' %str(e) return print '[INFO]Download file %s success' %remotePath", "import os from .config import UPYUNCONFIG from .trans import TranslatorIf class UpyunCli(TranslatorIf): '''Implemented", "%s failed' %remotePath return except Exception, e: print '[ERROR]Download file error:%s' %str(e) return", "inhanced from Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator = None def login(self,", "'[ERROR]Local file %s not exists' %localPath return if remotePath.endswith('/'): remotePath += os.path.basename(localPath) with", "'''Implemented of up yun client, inhanced from Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME def __init__(self):", "class UpyunCli(TranslatorIf): '''Implemented of up yun client, inhanced from Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME", "client, inhanced from Translator''' BUCKETNAME = UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator = None def", "if remotePath.endswith('/'): remotePath += os.path.basename(localPath) with open(localPath, 'rb') as fp: try: self.operator.mkdir(os.path.dirname(remotePath)) if", "= UPYUNCONFIG.BUCKETNAME def __init__(self): self.operator = None def login(self, *arg, **wargs): print arg", "except Exception, e: print '[ERROR]Login error:%s' %str(e) return return True def upload(self, localPath,", "try: if not self.operator.get(remotePath, fp): print '[ERROR]Download file %s failed' %remotePath return except", "'[ERROR] upload file except:%s' %str(e) return print '[INFO]Upload file %s success!' %localPath def", "except Exception, e: print '[ERROR] upload file except:%s' %str(e) return print '[INFO]Upload file", "print '[INFO]Upload file %s success!' %localPath def download(self, remotePath, localPath): with open(localPath, 'wb')", "**wargs): print arg user, pwd = arg try: self.operator = upyun.UpYun(self.BUCKETNAME, user, pwd,", "# encoding: utf-8 import upyun import os from .config import UPYUNCONFIG from .trans", "Exception, e: print '[ERROR] upload file except:%s' %str(e) return print '[INFO]Upload file %s", "None def login(self, *arg, **wargs): print arg user, pwd = arg try: self.operator", "python # encoding: utf-8 import upyun import os from .config import UPYUNCONFIG from", "upyun.UpYun(self.BUCKETNAME, user, pwd, timeout = 30, endpoint = upyun.ED_AUTO) except Exception, e: print", "self.operator = upyun.UpYun(self.BUCKETNAME, user, pwd, timeout = 30, endpoint = upyun.ED_AUTO) except Exception," ]
[ "from flask import Flask app = Flask(__name__) @app.route(\"/\") def home(): \"\"\"\"View for the", "page of the Website\"\"\" return \"Welcome to the HomePage!\" @app.route('/square/<int:number>') def show_square(number): \"\"\"View", "the Home page of the Website\"\"\" return \"Welcome to the HomePage!\" @app.route('/square/<int:number>') def", "for the Home page of the Website\"\"\" return \"Welcome to the HomePage!\" @app.route('/square/<int:number>')", "demonstrate Dynamic Routing\"\"\" from flask import Flask app = Flask(__name__) @app.route(\"/\") def home():", "import Flask app = Flask(__name__) @app.route(\"/\") def home(): \"\"\"\"View for the Home page", "Dynamic Routing\"\"\" from flask import Flask app = Flask(__name__) @app.route(\"/\") def home(): \"\"\"\"View", "home(): \"\"\"\"View for the Home page of the Website\"\"\" return \"Welcome to the", "\"\"\"\"View for the Home page of the Website\"\"\" return \"Welcome to the HomePage!\"", "Website\"\"\" return \"Welcome to the HomePage!\" @app.route('/square/<int:number>') def show_square(number): \"\"\"View that shows the", "Flask(__name__) @app.route(\"/\") def home(): \"\"\"\"View for the Home page of the Website\"\"\" return", "@app.route('/square/<int:number>') def show_square(number): \"\"\"View that shows the square of the number passed by", "the square of the number passed by URL\"\"\" return f\"Square of {str(number)} is:", "passed by URL\"\"\" return f\"Square of {str(number)} is: {(number * number)}\" if __name__", "shows the square of the number passed by URL\"\"\" return f\"Square of {str(number)}", "Flask app = Flask(__name__) @app.route(\"/\") def home(): \"\"\"\"View for the Home page of", "\"Welcome to the HomePage!\" @app.route('/square/<int:number>') def show_square(number): \"\"\"View that shows the square of", "\"\"\"An example application to demonstrate Dynamic Routing\"\"\" from flask import Flask app =", "the HomePage!\" @app.route('/square/<int:number>') def show_square(number): \"\"\"View that shows the square of the number", "show_square(number): \"\"\"View that shows the square of the number passed by URL\"\"\" return", "def home(): \"\"\"\"View for the Home page of the Website\"\"\" return \"Welcome to", "of the Website\"\"\" return \"Welcome to the HomePage!\" @app.route('/square/<int:number>') def show_square(number): \"\"\"View that", "to demonstrate Dynamic Routing\"\"\" from flask import Flask app = Flask(__name__) @app.route(\"/\") def", "return \"Welcome to the HomePage!\" @app.route('/square/<int:number>') def show_square(number): \"\"\"View that shows the square", "def show_square(number): \"\"\"View that shows the square of the number passed by URL\"\"\"", "return f\"Square of {str(number)} is: {(number * number)}\" if __name__ == '__main__': app.run(debug=True)", "@app.route(\"/\") def home(): \"\"\"\"View for the Home page of the Website\"\"\" return \"Welcome", "HomePage!\" @app.route('/square/<int:number>') def show_square(number): \"\"\"View that shows the square of the number passed", "Routing\"\"\" from flask import Flask app = Flask(__name__) @app.route(\"/\") def home(): \"\"\"\"View for", "Home page of the Website\"\"\" return \"Welcome to the HomePage!\" @app.route('/square/<int:number>') def show_square(number):", "application to demonstrate Dynamic Routing\"\"\" from flask import Flask app = Flask(__name__) @app.route(\"/\")", "number passed by URL\"\"\" return f\"Square of {str(number)} is: {(number * number)}\" if", "that shows the square of the number passed by URL\"\"\" return f\"Square of", "by URL\"\"\" return f\"Square of {str(number)} is: {(number * number)}\" if __name__ ==", "to the HomePage!\" @app.route('/square/<int:number>') def show_square(number): \"\"\"View that shows the square of the", "\"\"\"View that shows the square of the number passed by URL\"\"\" return f\"Square", "square of the number passed by URL\"\"\" return f\"Square of {str(number)} is: {(number", "flask import Flask app = Flask(__name__) @app.route(\"/\") def home(): \"\"\"\"View for the Home", "the number passed by URL\"\"\" return f\"Square of {str(number)} is: {(number * number)}\"", "the Website\"\"\" return \"Welcome to the HomePage!\" @app.route('/square/<int:number>') def show_square(number): \"\"\"View that shows", "of the number passed by URL\"\"\" return f\"Square of {str(number)} is: {(number *", "app = Flask(__name__) @app.route(\"/\") def home(): \"\"\"\"View for the Home page of the", "= Flask(__name__) @app.route(\"/\") def home(): \"\"\"\"View for the Home page of the Website\"\"\"", "example application to demonstrate Dynamic Routing\"\"\" from flask import Flask app = Flask(__name__)", "URL\"\"\" return f\"Square of {str(number)} is: {(number * number)}\" if __name__ == '__main__':" ]
[ "!= 0: return msg = data['d'] try: if data['t'] == 'MESSAGE_CREATE': if 'content'", "msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if 'nick' in msg['member'] else None, )) elif data['t'] ==", "None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if 'nick' in msg['member'] else None, )) elif", "parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if 'nick'", "hasattr(self, 'ahttp'): self.ahttp = aiohttp.ClientSession() self.db = await db.Db().connect(self.loop) async def on_socket_response(self, data):", "if msg['attachments'] else None, msg['id'], )) elif data['t'] == 'MESSAGE_DELETE': await self.db.execute(''' UPDATE", "import parse from datetime import datetime from logitch import config, db class Client(discord.Client):", "created_at), message, attachments FROM discord_entries WHERE id=%s; ''', (msg['id'],) ) await self.db.execute(''' UPDATE", "bot=config['discord']['bot']) if __name__ == '__main__': from logitch import config_load, logger config_load() logger.set_logger('discord.log') main()", "%s, %s, %s, %s, %s, %s); ''', ( msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'],", "logging.exception('on_socket_response') def main(): bot = Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__ == '__main__': from", "= aiohttp.ClientSession() self.db = await db.Db().connect(self.loop) async def on_socket_response(self, data): if data['op'] !=", "msg: return if msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entry_versions (entry_id,", "0: return await self.db.execute(''' INSERT INTO discord_entry_versions (entry_id, created_at, message, attachments) SELECT id,", "else None, msg['id'], )) elif data['t'] == 'MESSAGE_DELETE': await self.db.execute(''' UPDATE discord_entries SET", "msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entries (id, server_id, channel_id, created_at,", "SET updated_at=%s, message=%s, attachments=%s WHERE id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments']", "pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging, asyncio, json, aiohttp from dateutil.parser import parse", "%s, %s, %s, %s, %s, %s, %s, %s); ''', ( msg['id'], msg['guild_id'], msg['channel_id'],", "updated_at=%s, message=%s, attachments=%s WHERE id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else", "import logging, asyncio, json, aiohttp from dateutil.parser import parse from datetime import datetime", "attachments FROM discord_entries WHERE id=%s; ''', (msg['id'],) ) await self.db.execute(''' UPDATE discord_entries SET", "main(): bot = Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__ == '__main__': from logitch import", "if 'content' not in msg: return if msg['type'] != 0: return await self.db.execute('''", "'content' not in msg: return if msg['type'] != 0: return await self.db.execute(''' INSERT", "https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging, asyncio, json, aiohttp from dateutil.parser import parse from datetime", "not in msg: return if msg['type'] != 0: return await self.db.execute(''' INSERT INTO", "''', ( msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['author']['username'],", "!= 0: return await self.db.execute(''' INSERT INTO discord_entries (id, server_id, channel_id, created_at, message,", "SET deleted=\"Y\", deleted_at=%s WHERE id=%s; ''', (datetime.utcnow(), msg['id'],) ) except: logging.exception('on_socket_response') def main():", "return if msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entry_versions (entry_id, created_at,", "user_discriminator, member_nick) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);", "%s, %s, %s, %s); ''', ( msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if", "bot = Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__ == '__main__': from logitch import config_load,", "except: logging.exception('on_socket_response') def main(): bot = Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__ == '__main__':", "WHERE id=%s; ''', (msg['id'],) ) await self.db.execute(''' UPDATE discord_entries SET updated_at=%s, message=%s, attachments=%s", "await self.db.execute(''' UPDATE discord_entries SET updated_at=%s, message=%s, attachments=%s WHERE id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None),", "data['t'] == 'MESSAGE_UPDATE': if 'content' not in msg: return if msg['type'] != 0:", "return await self.db.execute(''' INSERT INTO discord_entries (id, server_id, channel_id, created_at, message, attachments, user,", "attachments, user, user_id, user_discriminator, member_nick) VALUES (%s, %s, %s, %s, %s, %s, %s,", "( msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['author']['username'], msg['author']['id'],", "(msg['id'],) ) await self.db.execute(''' UPDATE discord_entries SET updated_at=%s, message=%s, attachments=%s WHERE id=%s; ''',", "data['t'] == 'MESSAGE_CREATE': if 'content' not in msg: return if msg['type'] != 0:", "from datetime import datetime from logitch import config, db class Client(discord.Client): async def", "UPDATE discord_entries SET updated_at=%s, message=%s, attachments=%s WHERE id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments'])", "deleted_at=%s WHERE id=%s; ''', (datetime.utcnow(), msg['id'],) ) except: logging.exception('on_socket_response') def main(): bot =", "return if msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entries (id, server_id,", "data['op'] != 0: return msg = data['d'] try: if data['t'] == 'MESSAGE_CREATE': if", "parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['id'], )) elif data['t'] == 'MESSAGE_DELETE':", "''') import logging, asyncio, json, aiohttp from dateutil.parser import parse from datetime import", "parse from datetime import datetime from logitch import config, db class Client(discord.Client): async", "%s, %s); ''', ( msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else", "db class Client(discord.Client): async def on_connect(self): if not hasattr(self, 'ahttp'): self.ahttp = aiohttp.ClientSession()", "dateutil.parser import parse from datetime import datetime from logitch import config, db class", "self.db = await db.Db().connect(self.loop) async def on_socket_response(self, data): if data['op'] != 0: return", "installed manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging, asyncio, json, aiohttp from dateutil.parser", "(datetime.utcnow(), msg['id'],) ) except: logging.exception('on_socket_response') def main(): bot = Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if", "SELECT id, ifnull(updated_at, created_at), message, attachments FROM discord_entries WHERE id=%s; ''', (msg['id'],) )", "await self.db.execute(''' INSERT INTO discord_entry_versions (entry_id, created_at, message, attachments) SELECT id, ifnull(updated_at, created_at),", "msg['id'], )) elif data['t'] == 'MESSAGE_DELETE': await self.db.execute(''' UPDATE discord_entries SET deleted=\"Y\", deleted_at=%s", ") except: logging.exception('on_socket_response') def main(): bot = Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__ ==", "'nick' in msg['member'] else None, )) elif data['t'] == 'MESSAGE_UPDATE': if 'content' not", "await self.db.execute(''' UPDATE discord_entries SET deleted=\"Y\", deleted_at=%s WHERE id=%s; ''', (datetime.utcnow(), msg['id'],) )", "attachments) SELECT id, ifnull(updated_at, created_at), message, attachments FROM discord_entries WHERE id=%s; ''', (msg['id'],)", "msg['id'],) ) except: logging.exception('on_socket_response') def main(): bot = Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__", ")) elif data['t'] == 'MESSAGE_DELETE': await self.db.execute(''' UPDATE discord_entries SET deleted=\"Y\", deleted_at=%s WHERE", "WHERE id=%s; ''', (datetime.utcnow(), msg['id'],) ) except: logging.exception('on_socket_response') def main(): bot = Client()", "INTO discord_entries (id, server_id, channel_id, created_at, message, attachments, user, user_id, user_discriminator, member_nick) VALUES", "return await self.db.execute(''' INSERT INTO discord_entry_versions (entry_id, created_at, message, attachments) SELECT id, ifnull(updated_at,", "server_id, channel_id, created_at, message, attachments, user, user_id, user_discriminator, member_nick) VALUES (%s, %s, %s,", "discord_entries SET updated_at=%s, message=%s, attachments=%s WHERE id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if", "%s, %s, %s, %s, %s, %s, %s, %s, %s); ''', ( msg['id'], msg['guild_id'],", "install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging, asyncio, json, aiohttp from dateutil.parser import parse from", "user_id, user_discriminator, member_nick) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s,", "msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entry_versions (entry_id, created_at, message, attachments)", ") await self.db.execute(''' UPDATE discord_entries SET updated_at=%s, message=%s, attachments=%s WHERE id=%s; ''', (", "def on_connect(self): if not hasattr(self, 'ahttp'): self.ahttp = aiohttp.ClientSession() self.db = await db.Db().connect(self.loop)", "data['d'] try: if data['t'] == 'MESSAGE_CREATE': if 'content' not in msg: return if", "id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['id'], )) elif", "not hasattr(self, 'ahttp'): self.ahttp = aiohttp.ClientSession() self.db = await db.Db().connect(self.loop) async def on_socket_response(self,", "msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'],", "if not hasattr(self, 'ahttp'): self.ahttp = aiohttp.ClientSession() self.db = await db.Db().connect(self.loop) async def", ")) elif data['t'] == 'MESSAGE_UPDATE': if 'content' not in msg: return if msg['type']", "try: import discord except ImportError: raise Exception(''' The discord libary must be installed", "if msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entries (id, server_id, channel_id,", "== 'MESSAGE_CREATE': if 'content' not in msg: return if msg['type'] != 0: return", "id, ifnull(updated_at, created_at), message, attachments FROM discord_entries WHERE id=%s; ''', (msg['id'],) ) await", "asyncio, json, aiohttp from dateutil.parser import parse from datetime import datetime from logitch", "== 'MESSAGE_UPDATE': if 'content' not in msg: return if msg['type'] != 0: return", "UPDATE discord_entries SET deleted=\"Y\", deleted_at=%s WHERE id=%s; ''', (datetime.utcnow(), msg['id'],) ) except: logging.exception('on_socket_response')", "message=%s, attachments=%s WHERE id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None,", "try: if data['t'] == 'MESSAGE_CREATE': if 'content' not in msg: return if msg['type']", "!= 0: return await self.db.execute(''' INSERT INTO discord_entry_versions (entry_id, created_at, message, attachments) SELECT", "Exception(''' The discord libary must be installed manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import", "msg['attachments'] else None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if 'nick' in msg['member'] else None,", "(id, server_id, channel_id, created_at, message, attachments, user, user_id, user_discriminator, member_nick) VALUES (%s, %s,", "Client(discord.Client): async def on_connect(self): if not hasattr(self, 'ahttp'): self.ahttp = aiohttp.ClientSession() self.db =", "def on_socket_response(self, data): if data['op'] != 0: return msg = data['d'] try: if", "raise Exception(''' The discord libary must be installed manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''')", "ImportError: raise Exception(''' The discord libary must be installed manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip", "data): if data['op'] != 0: return msg = data['d'] try: if data['t'] ==", "msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick']", "def main(): bot = Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__ == '__main__': from logitch", "json.dumps(msg['attachments']) if msg['attachments'] else None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if 'nick' in msg['member']", "(entry_id, created_at, message, attachments) SELECT id, ifnull(updated_at, created_at), message, attachments FROM discord_entries WHERE", "''', (msg['id'],) ) await self.db.execute(''' UPDATE discord_entries SET updated_at=%s, message=%s, attachments=%s WHERE id=%s;", "msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if", "discord libary must be installed manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging, asyncio,", "else None, )) elif data['t'] == 'MESSAGE_UPDATE': if 'content' not in msg: return", "class Client(discord.Client): async def on_connect(self): if not hasattr(self, 'ahttp'): self.ahttp = aiohttp.ClientSession() self.db", "0: return await self.db.execute(''' INSERT INTO discord_entries (id, server_id, channel_id, created_at, message, attachments,", "'MESSAGE_CREATE': if 'content' not in msg: return if msg['type'] != 0: return await", "attachments=%s WHERE id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['id'],", "datetime from logitch import config, db class Client(discord.Client): async def on_connect(self): if not", "data['t'] == 'MESSAGE_DELETE': await self.db.execute(''' UPDATE discord_entries SET deleted=\"Y\", deleted_at=%s WHERE id=%s; ''',", "self.db.execute(''' INSERT INTO discord_entry_versions (entry_id, created_at, message, attachments) SELECT id, ifnull(updated_at, created_at), message,", "discord_entries SET deleted=\"Y\", deleted_at=%s WHERE id=%s; ''', (datetime.utcnow(), msg['id'],) ) except: logging.exception('on_socket_response') def", "Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__ == '__main__': from logitch import config_load, logger config_load()", "self.db.execute(''' INSERT INTO discord_entries (id, server_id, channel_id, created_at, message, attachments, user, user_id, user_discriminator,", "import discord except ImportError: raise Exception(''' The discord libary must be installed manually:", "= Client() bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__ == '__main__': from logitch import config_load, logger", "datetime import datetime from logitch import config, db class Client(discord.Client): async def on_connect(self):", "libary must be installed manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging, asyncio, json,", "id=%s; ''', (msg['id'],) ) await self.db.execute(''' UPDATE discord_entries SET updated_at=%s, message=%s, attachments=%s WHERE", "id=%s; ''', (datetime.utcnow(), msg['id'],) ) except: logging.exception('on_socket_response') def main(): bot = Client() bot.run(config['discord']['token'],", "async def on_connect(self): if not hasattr(self, 'ahttp'): self.ahttp = aiohttp.ClientSession() self.db = await", "%s, %s, %s, %s, %s); ''', ( msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments'])", "message, attachments, user, user_id, user_discriminator, member_nick) VALUES (%s, %s, %s, %s, %s, %s,", "discord except ImportError: raise Exception(''' The discord libary must be installed manually: pip", "bot.run(config['discord']['token'], bot=config['discord']['bot']) if __name__ == '__main__': from logitch import config_load, logger config_load() logger.set_logger('discord.log')", "msg: return if msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entries (id,", "channel_id, created_at, message, attachments, user, user_id, user_discriminator, member_nick) VALUES (%s, %s, %s, %s,", "on_socket_response(self, data): if data['op'] != 0: return msg = data['d'] try: if data['t']", "INTO discord_entry_versions (entry_id, created_at, message, attachments) SELECT id, ifnull(updated_at, created_at), message, attachments FROM", "in msg: return if msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entry_versions", "must be installed manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging, asyncio, json, aiohttp", "None, )) elif data['t'] == 'MESSAGE_UPDATE': if 'content' not in msg: return if", "elif data['t'] == 'MESSAGE_DELETE': await self.db.execute(''' UPDATE discord_entries SET deleted=\"Y\", deleted_at=%s WHERE id=%s;", "INSERT INTO discord_entries (id, server_id, channel_id, created_at, message, attachments, user, user_id, user_discriminator, member_nick)", "%s, %s, %s, %s, %s, %s, %s); ''', ( msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None),", "msg['member']['nick'] if 'nick' in msg['member'] else None, )) elif data['t'] == 'MESSAGE_UPDATE': if", "msg['member'] else None, )) elif data['t'] == 'MESSAGE_UPDATE': if 'content' not in msg:", "json.dumps(msg['attachments']) if msg['attachments'] else None, msg['id'], )) elif data['t'] == 'MESSAGE_DELETE': await self.db.execute('''", "msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['id'], )) elif data['t'] == 'MESSAGE_DELETE': await", "INSERT INTO discord_entry_versions (entry_id, created_at, message, attachments) SELECT id, ifnull(updated_at, created_at), message, attachments", "user, user_id, user_discriminator, member_nick) VALUES (%s, %s, %s, %s, %s, %s, %s, %s,", "on_connect(self): if not hasattr(self, 'ahttp'): self.ahttp = aiohttp.ClientSession() self.db = await db.Db().connect(self.loop) async", "None, msg['id'], )) elif data['t'] == 'MESSAGE_DELETE': await self.db.execute(''' UPDATE discord_entries SET deleted=\"Y\",", "be installed manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging, asyncio, json, aiohttp from", "created_at, message, attachments, user, user_id, user_discriminator, member_nick) VALUES (%s, %s, %s, %s, %s,", "== 'MESSAGE_DELETE': await self.db.execute(''' UPDATE discord_entries SET deleted=\"Y\", deleted_at=%s WHERE id=%s; ''', (datetime.utcnow(),", "in msg['member'] else None, )) elif data['t'] == 'MESSAGE_UPDATE': if 'content' not in", "config, db class Client(discord.Client): async def on_connect(self): if not hasattr(self, 'ahttp'): self.ahttp =", "async def on_socket_response(self, data): if data['op'] != 0: return msg = data['d'] try:", "created_at, message, attachments) SELECT id, ifnull(updated_at, created_at), message, attachments FROM discord_entries WHERE id=%s;", "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s); ''', (", "%s); ''', ( msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None,", "'MESSAGE_DELETE': await self.db.execute(''' UPDATE discord_entries SET deleted=\"Y\", deleted_at=%s WHERE id=%s; ''', (datetime.utcnow(), msg['id'],)", "aiohttp from dateutil.parser import parse from datetime import datetime from logitch import config,", "import config, db class Client(discord.Client): async def on_connect(self): if not hasattr(self, 'ahttp'): self.ahttp", "0: return msg = data['d'] try: if data['t'] == 'MESSAGE_CREATE': if 'content' not", "ifnull(updated_at, created_at), message, attachments FROM discord_entries WHERE id=%s; ''', (msg['id'],) ) await self.db.execute('''", "FROM discord_entries WHERE id=%s; ''', (msg['id'],) ) await self.db.execute(''' UPDATE discord_entries SET updated_at=%s,", "in msg: return if msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entries", "WHERE id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['id'], ))", "message, attachments) SELECT id, ifnull(updated_at, created_at), message, attachments FROM discord_entries WHERE id=%s; ''',", "except ImportError: raise Exception(''' The discord libary must be installed manually: pip install", "from dateutil.parser import parse from datetime import datetime from logitch import config, db", "msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if 'nick' in msg['member'] else None, )) elif data['t']", "self.db.execute(''' UPDATE discord_entries SET updated_at=%s, message=%s, attachments=%s WHERE id=%s; ''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'],", "from logitch import config, db class Client(discord.Client): async def on_connect(self): if not hasattr(self,", "aiohttp.ClientSession() self.db = await db.Db().connect(self.loop) async def on_socket_response(self, data): if data['op'] != 0:", "( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['id'], )) elif data['t'] ==", "''', (datetime.utcnow(), msg['id'],) ) except: logging.exception('on_socket_response') def main(): bot = Client() bot.run(config['discord']['token'], bot=config['discord']['bot'])", "if data['t'] == 'MESSAGE_CREATE': if 'content' not in msg: return if msg['type'] !=", "msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if 'nick' in", "msg['author']['discriminator'], msg['member']['nick'] if 'nick' in msg['member'] else None, )) elif data['t'] == 'MESSAGE_UPDATE':", "import datetime from logitch import config, db class Client(discord.Client): async def on_connect(self): if", "if msg['attachments'] else None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if 'nick' in msg['member'] else", "await db.Db().connect(self.loop) async def on_socket_response(self, data): if data['op'] != 0: return msg =", "= await db.Db().connect(self.loop) async def on_socket_response(self, data): if data['op'] != 0: return msg", "deleted=\"Y\", deleted_at=%s WHERE id=%s; ''', (datetime.utcnow(), msg['id'],) ) except: logging.exception('on_socket_response') def main(): bot", "member_nick) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s); ''',", "if 'nick' in msg['member'] else None, )) elif data['t'] == 'MESSAGE_UPDATE': if 'content'", "manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging, asyncio, json, aiohttp from dateutil.parser import", "db.Db().connect(self.loop) async def on_socket_response(self, data): if data['op'] != 0: return msg = data['d']", "json, aiohttp from dateutil.parser import parse from datetime import datetime from logitch import", "discord_entries WHERE id=%s; ''', (msg['id'],) ) await self.db.execute(''' UPDATE discord_entries SET updated_at=%s, message=%s,", "= data['d'] try: if data['t'] == 'MESSAGE_CREATE': if 'content' not in msg: return", "logitch import config, db class Client(discord.Client): async def on_connect(self): if not hasattr(self, 'ahttp'):", "elif data['t'] == 'MESSAGE_UPDATE': if 'content' not in msg: return if msg['type'] !=", "'ahttp'): self.ahttp = aiohttp.ClientSession() self.db = await db.Db().connect(self.loop) async def on_socket_response(self, data): if", "else None, msg['author']['username'], msg['author']['id'], msg['author']['discriminator'], msg['member']['nick'] if 'nick' in msg['member'] else None, ))", "%s, %s, %s); ''', ( msg['id'], msg['guild_id'], msg['channel_id'], parse(msg['timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments']", "msg['attachments'] else None, msg['id'], )) elif data['t'] == 'MESSAGE_DELETE': await self.db.execute(''' UPDATE discord_entries", "logging, asyncio, json, aiohttp from dateutil.parser import parse from datetime import datetime from", "''', ( parse(msg['edited_timestamp']).replace(tzinfo=None), msg['content'], json.dumps(msg['attachments']) if msg['attachments'] else None, msg['id'], )) elif data['t']", "return msg = data['d'] try: if data['t'] == 'MESSAGE_CREATE': if 'content' not in", "await self.db.execute(''' INSERT INTO discord_entries (id, server_id, channel_id, created_at, message, attachments, user, user_id,", "discord_entries (id, server_id, channel_id, created_at, message, attachments, user, user_id, user_discriminator, member_nick) VALUES (%s,", "self.db.execute(''' UPDATE discord_entries SET deleted=\"Y\", deleted_at=%s WHERE id=%s; ''', (datetime.utcnow(), msg['id'],) ) except:", "message, attachments FROM discord_entries WHERE id=%s; ''', (msg['id'],) ) await self.db.execute(''' UPDATE discord_entries", "self.ahttp = aiohttp.ClientSession() self.db = await db.Db().connect(self.loop) async def on_socket_response(self, data): if data['op']", "if data['op'] != 0: return msg = data['d'] try: if data['t'] == 'MESSAGE_CREATE':", "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s); ''', ( msg['id'],", "discord_entry_versions (entry_id, created_at, message, attachments) SELECT id, ifnull(updated_at, created_at), message, attachments FROM discord_entries", "if msg['type'] != 0: return await self.db.execute(''' INSERT INTO discord_entry_versions (entry_id, created_at, message,", "The discord libary must be installed manually: pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip ''') import logging,", "'MESSAGE_UPDATE': if 'content' not in msg: return if msg['type'] != 0: return await", "msg = data['d'] try: if data['t'] == 'MESSAGE_CREATE': if 'content' not in msg:" ]
[ "import * from Package.PackageBase import * from Packager.PackagerBase import * from Package.VirtualPackageBase import", "from Package.PackageBase import * from Packager.PackagerBase import * from Package.VirtualPackageBase import * from", "class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides a base class for pip packages\"\"\" def __init__(self):", "* from Source.MultiSource import * class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides a base class", "* class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides a base class for pip packages\"\"\" def", "\"\"\"provides a base class for pip packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if", "base class for pip packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__", "self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from PackagerBase def createPackage(self): return True", "PipBuildSystem, PackagerBase): \"\"\"provides a base class for pip packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\")", "PackagerBase): \"\"\"provides a base class for pip packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self)", "from Source.MultiSource import * class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides a base class for", "from Packager.PackagerBase import * from Package.VirtualPackageBase import * from Source.MultiSource import * class", "packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self) else:", "pip packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self)", "+= (MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from PackagerBase", "else: self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from PackagerBase def createPackage(self): return", "called\") PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self)", "Package.VirtualPackageBase import * from Source.MultiSource import * class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides a", "import * from Source.MultiSource import * class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides a base", "__init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__ +=", "PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self)", "(VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from PackagerBase def createPackage(self): return True def preArchive(self):", "PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from PackagerBase def createPackage(self): return True def preArchive(self): return True", "from BuildSystem.PipBuildSystem import * from Package.PackageBase import * from Packager.PackagerBase import * from", "Source.MultiSource import * class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides a base class for pip", "(MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from PackagerBase def", "* from Package.VirtualPackageBase import * from Source.MultiSource import * class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase):", "BuildSystem.PipBuildSystem import * from Package.PackageBase import * from Packager.PackagerBase import * from Package.VirtualPackageBase", "CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__ += (VirtualPackageBase,)", "from Package.VirtualPackageBase import * from Source.MultiSource import * class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides", "for pip packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,)", "* from Packager.PackagerBase import * from Package.VirtualPackageBase import * from Source.MultiSource import *", "+= (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from PackagerBase def createPackage(self): return True def", "MultiSource.__init__(self) else: self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from PackagerBase def createPackage(self):", "self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) #", "Packager.PackagerBase import * from Package.VirtualPackageBase import * from Source.MultiSource import * class PipPackageBase(PackageBase,", "Package.PackageBase import * from Packager.PackagerBase import * from Package.VirtualPackageBase import * from Source.MultiSource", "if self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self)", "VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from PackagerBase def createPackage(self): return True def preArchive(self): return", "class for pip packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__ +=", "import * from Packager.PackagerBase import * from Package.VirtualPackageBase import * from Source.MultiSource import", "import * class PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides a base class for pip packages\"\"\"", "a base class for pip packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if self.subinfo.svnTarget():", "def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__ called\") PackageBase.__init__(self) if self.subinfo.svnTarget(): self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__", "* from Package.PackageBase import * from Packager.PackagerBase import * from Package.VirtualPackageBase import *", "PipPackageBase(PackageBase, PipBuildSystem, PackagerBase): \"\"\"provides a base class for pip packages\"\"\" def __init__(self): CraftCore.log.debug(\"PipPackageBase.__init__", "import * from Package.VirtualPackageBase import * from Source.MultiSource import * class PipPackageBase(PackageBase, PipBuildSystem,", "self.__class__.__bases__ += (MultiSource,) MultiSource.__init__(self) else: self.__class__.__bases__ += (VirtualPackageBase,) VirtualPackageBase.__init__(self) PipBuildSystem.__init__(self) PackagerBase.__init__(self) # from" ]
[ "in the base model print(\"Number of layers in the base model: \", len(base_model.layers))", "plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) # Create the base", "tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image, _ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image[0] for", "# Let's take a look to see how many layers are in the", "batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names = train_dataset.class_names plt.figure(figsize=(10, 10))", "// 5) print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches:", "as plt import numpy as np import os import tensorflow as tf from", "160) train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE)", "Freeze all the layers before the `fine_tune_at` layer for layer in base_model.layers[:fine_tune_at]: layer.trainable", "image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for", "= test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image, _ in train_dataset.take(1):", "the base model from the pre-trained model MobileNet V2 IMG_SHAPE = IMG_SIZE +", "from the pre-trained model MobileNet V2 IMG_SHAPE = IMG_SIZE + (3,) base_model =", "import os import tensorflow as tf from tensorflow.keras.preprocessing import image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'", "5) validation_dataset = validation_dataset.skip(val_batches // 5) print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))", "train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image[0] for i in range(9): ax = plt.subplot(3,", "plt.imshow(augmented_image[0] / 255) plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) #", "-1) # Create the base model from the pre-trained model MobileNet V2 IMG_SHAPE", "os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE = 32 IMG_SIZE = (160, 160)", "image[0] for i in range(9): ax = plt.subplot(3, 3, i + 1) augmented_image", "image_batch, label_batch = next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape) base_model.trainable = True # Let's", "AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation", "feature_batch = base_model(image_batch) print(feature_batch.shape) base_model.trainable = True # Let's take a look to", "all the layers before the `fine_tune_at` layer for layer in base_model.layers[:fine_tune_at]: layer.trainable =", "batches: %d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)", "tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image, _ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image[0]", "print(\"Number of layers in the base model: \", len(base_model.layers)) # Fine-tune from this", "plt import numpy as np import os import tensorflow as tf from tensorflow.keras.preprocessing", "= data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255) plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5,", "len(base_model.layers)) # Fine-tune from this layer onwards fine_tune_at = 100 # Freeze all", "plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255) plt.axis('off')", "data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image, _ in train_dataset.take(1): plt.figure(figsize=(10, 10))", "MobileNet V2 IMG_SHAPE = IMG_SIZE + (3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch =", "# Fine-tune from this layer onwards fine_tune_at = 100 # Freeze all the", "onwards fine_tune_at = 100 # Freeze all the layers before the `fine_tune_at` layer", "numpy as np import os import tensorflow as tf from tensorflow.keras.preprocessing import image_dataset_from_directory", "plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for i in range(9): ax =", "in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches", "image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')", "the base model: \", len(base_model.layers)) # Fine-tune from this layer onwards fine_tune_at =", "class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for i in", "%d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE", "of test batches: %d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset", "% tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset =", "for image, _ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image[0] for i in", "layer onwards fine_tune_at = 100 # Freeze all the layers before the `fine_tune_at`", "<reponame>BrunoDatoMeneses/TensorFlowTutorials import matplotlib.pyplot as plt import numpy as np import os import tensorflow", "= tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) # Create the base model from", "import numpy as np import os import tensorflow as tf from tensorflow.keras.preprocessing import", "_ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image[0] for i in range(9): ax", "255) plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) # Create the", "base model print(\"Number of layers in the base model: \", len(base_model.layers)) # Fine-tune", "shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1):", "+ 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches // 5)", "layers in the base model: \", len(base_model.layers)) # Fine-tune from this layer onwards", "image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels in", "'train') validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE = 32 IMG_SIZE = (160, 160) train_dataset", "tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE train_dataset =", "batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE =", "base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch = next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape) base_model.trainable =", "plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset =", "for i in range(9): ax = plt.subplot(3, 3, i + 1) augmented_image =", "train_dataset.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype(\"uint8\"))", "os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE = 32", "3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255) plt.axis('off') preprocess_input", "model: \", len(base_model.layers)) # Fine-tune from this layer onwards fine_tune_at = 100 #", "validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE = 32 IMG_SIZE = (160, 160) train_dataset =", "_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir", "5) print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches: %d'", "plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches // 5) validation_dataset = validation_dataset.skip(val_batches //", "the layers before the `fine_tune_at` layer for layer in base_model.layers[:fine_tune_at]: layer.trainable = False", "from tensorflow.keras.preprocessing import image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH", "see how many layers are in the base model print(\"Number of layers in", "images, labels in train_dataset.take(1): for i in range(9): ax = plt.subplot(3, 3, i", "ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset)", "import matplotlib.pyplot as plt import numpy as np import os import tensorflow as", "extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation')", "augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255) plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale =", "as np import os import tensorflow as tf from tensorflow.keras.preprocessing import image_dataset_from_directory _URL", "= base_model(image_batch) print(feature_batch.shape) base_model.trainable = True # Let's take a look to see", "= validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image,", "train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),", "batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for", "= tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image, _ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image", "= 32 IMG_SIZE = (160, 160) train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset", "plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches // 5) validation_dataset = validation_dataset.skip(val_batches", "in range(9): ax = plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0))", "BATCH_SIZE = 32 IMG_SIZE = (160, 160) train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE)", "layers are in the base model print(\"Number of layers in the base model:", "/ 255) plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) # Create", "train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names", "matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf", "i + 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches //", "this layer onwards fine_tune_at = 100 # Freeze all the layers before the", "0)) plt.imshow(augmented_image[0] / 255) plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)", "100 # Freeze all the layers before the `fine_tune_at` layer for layer in", "test_dataset = validation_dataset.take(val_batches // 5) validation_dataset = validation_dataset.skip(val_batches // 5) print('Number of validation", "take a look to see how many layers are in the base model", "model MobileNet V2 IMG_SHAPE = IMG_SIZE + (3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch", "= IMG_SIZE + (3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch = next(iter(train_dataset)) feature_batch =", "32 IMG_SIZE = (160, 160) train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset =", "tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)", "plt.figure(figsize=(10, 10)) first_image = image[0] for i in range(9): ax = plt.subplot(3, 3,", "= train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for i in range(9):", "next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape) base_model.trainable = True # Let's take a look", "+ (3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch = next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape)", "to see how many layers are in the base model print(\"Number of layers", "import image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip),", "tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) # Create the base model from the pre-trained model MobileNet", "base model from the pre-trained model MobileNet V2 IMG_SHAPE = IMG_SIZE + (3,)", "validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for images,", "pre-trained model MobileNet V2 IMG_SHAPE = IMG_SIZE + (3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch,", "tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir =", "'validation') BATCH_SIZE = 32 IMG_SIZE = (160, 160) train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE,", "tf from tensorflow.keras.preprocessing import image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)", "= tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir", "label_batch = next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape) base_model.trainable = True # Let's take", "many layers are in the base model print(\"Number of layers in the base", "= image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names =", "3, i + 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches", "= image[0] for i in range(9): ax = plt.subplot(3, 3, i + 1)", "V2 IMG_SHAPE = IMG_SIZE + (3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch = next(iter(train_dataset))", "plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches // 5) validation_dataset =", "first_image = image[0] for i in range(9): ax = plt.subplot(3, 3, i +", "how many layers are in the base model print(\"Number of layers in the", "% tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE train_dataset", "range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches =", "IMG_SIZE + (3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch = next(iter(train_dataset)) feature_batch = base_model(image_batch)", "a look to see how many layers are in the base model print(\"Number", "tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image, _ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image =", "the base model print(\"Number of layers in the base model: \", len(base_model.layers)) #", "image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names = train_dataset.class_names", "labels in train_dataset.take(1): for i in range(9): ax = plt.subplot(3, 3, i +", "val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches // 5) validation_dataset = validation_dataset.skip(val_batches // 5)", "of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))", "= tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) # Create the base model from the pre-trained model", "+ 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255) plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input", "\", len(base_model.layers)) # Fine-tune from this layer onwards fine_tune_at = 100 # Freeze", "(3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch = next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape) base_model.trainable", "'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH,", "base model: \", len(base_model.layers)) # Fine-tune from this layer onwards fine_tune_at = 100", "model print(\"Number of layers in the base model: \", len(base_model.layers)) # Fine-tune from", "= 100 # Freeze all the layers before the `fine_tune_at` layer for layer", "ax = plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] /", "1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255) plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale", "test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image, _ in", "image, _ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image[0] for i in range(9):", "i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\")", "validation_dataset.take(val_batches // 5) validation_dataset = validation_dataset.skip(val_batches // 5) print('Number of validation batches: %d'", "= validation_dataset.skip(val_batches // 5) print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of", "= os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE = 32 IMG_SIZE = (160,", "test batches: %d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset =", "Create the base model from the pre-trained model MobileNet V2 IMG_SHAPE = IMG_SIZE", "= True # Let's take a look to see how many layers are", "= os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE =", "tensorflow as tf from tensorflow.keras.preprocessing import image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip',", "model from the pre-trained model MobileNet V2 IMG_SHAPE = IMG_SIZE + (3,) base_model", "range(9): ax = plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0]", "tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) # Create the base model from the", "base_model(image_batch) print(feature_batch.shape) base_model.trainable = True # Let's take a look to see how", "import tensorflow as tf from tensorflow.keras.preprocessing import image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip =", "= os.path.join(PATH, 'validation') BATCH_SIZE = 32 IMG_SIZE = (160, 160) train_dataset = image_dataset_from_directory(train_dir,", "test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image, _ in train_dataset.take(1): plt.figure(figsize=(10,", "image_size=IMG_SIZE) class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for i", "validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for image, _", "i in range(9): ax = plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image,", "validation_dataset.skip(val_batches // 5) print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of test", "IMG_SHAPE = IMG_SIZE + (3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch = next(iter(train_dataset)) feature_batch", "os import tensorflow as tf from tensorflow.keras.preprocessing import image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip", "]) for image, _ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image[0] for i", "= next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape) base_model.trainable = True # Let's take a", "path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train')", "validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ]) for", "train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for i in range(9): ax", "tensorflow.keras.preprocessing import image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH =", "# Freeze all the layers before the `fine_tune_at` layer for layer in base_model.layers[:fine_tune_at]:", "PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE", "for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]])", "fine_tune_at = 100 # Freeze all the layers before the `fine_tune_at` layer for", "data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255) plt.axis('off') preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset=", "print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)", "in the base model: \", len(base_model.layers)) # Fine-tune from this layer onwards fine_tune_at", "= plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset", "train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2), ])", "= image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels", "= plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255)", "= tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches // 5) validation_dataset = validation_dataset.skip(val_batches // 5) print('Number", "preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input #rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) # Create the base model", "// 5) validation_dataset = validation_dataset.skip(val_batches // 5) print('Number of validation batches: %d' %", "Fine-tune from this layer onwards fine_tune_at = 100 # Freeze all the layers", "tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches // 5) validation_dataset = validation_dataset.skip(val_batches // 5) print('Number of", "10)) for images, labels in train_dataset.take(1): for i in range(9): ax = plt.subplot(3,", "validation batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE", "from this layer onwards fine_tune_at = 100 # Freeze all the layers before", "i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image[0] / 255) plt.axis('off') preprocess_input =", "offset= -1) # Create the base model from the pre-trained model MobileNet V2", "print(feature_batch.shape) base_model.trainable = True # Let's take a look to see how many", "# Create the base model from the pre-trained model MobileNet V2 IMG_SHAPE =", "tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([", "the pre-trained model MobileNet V2 IMG_SHAPE = IMG_SIZE + (3,) base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF')", "1) plt.imshow(images[i].numpy().astype(\"uint8\")) plt.title(class_names[labels[i]]) plt.axis(\"off\") val_batches = tf.data.experimental.cardinality(validation_dataset) test_dataset = validation_dataset.take(val_batches // 5) validation_dataset", "validation_dataset = validation_dataset.skip(val_batches // 5) print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number", "print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset)) print('Number of test batches: %d' %", "for images, labels in train_dataset.take(1): for i in range(9): ax = plt.subplot(3, 3,", "are in the base model print(\"Number of layers in the base model: \",", "= 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir =", "base_model.trainable = True # Let's take a look to see how many layers", "#rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1) # Create the base model from the pre-trained", "of layers in the base model: \", len(base_model.layers)) # Fine-tune from this layer", "np import os import tensorflow as tf from tensorflow.keras.preprocessing import image_dataset_from_directory _URL =", "in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image[0] for i in range(9): ax =", "= validation_dataset.take(val_batches // 5) validation_dataset = validation_dataset.skip(val_batches // 5) print('Number of validation batches:", "os.path.join(PATH, 'validation') BATCH_SIZE = 32 IMG_SIZE = (160, 160) train_dataset = image_dataset_from_directory(train_dir, shuffle=True,", "Let's take a look to see how many layers are in the base", "%d' % tf.data.experimental.cardinality(test_dataset)) AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset", "in train_dataset.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1)", "= tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation =", "tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch = next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape) base_model.trainable = True #", "train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE = 32 IMG_SIZE =", "origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH,", "10)) first_image = image[0] for i in range(9): ax = plt.subplot(3, 3, i", "= tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF') image_batch, label_batch = next(iter(train_dataset)) feature_batch = base_model(image_batch) print(feature_batch.shape) base_model.trainable = True", "look to see how many layers are in the base model print(\"Number of", "= (160, 160) train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True,", "(160, 160) train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE,", "as tf from tensorflow.keras.preprocessing import image_dataset_from_directory _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL,", "shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) class_names = train_dataset.class_names plt.figure(figsize=(10,", "True # Let's take a look to see how many layers are in", "'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE = 32 IMG_SIZE", "= train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE) data_augmentation = tf.keras.Sequential([ tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),", "IMG_SIZE = (160, 160) train_dataset = image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = image_dataset_from_directory(validation_dir," ]
[ "from . import misc from .db_api.json_db import Database from .notify_admins import on_startup_notify from", "import misc from .db_api.json_db import Database from .notify_admins import on_startup_notify from .voice_recognition import", ".db_api.json_db import Database from .notify_admins import on_startup_notify from .voice_recognition import voice_to_text from .pages", ". import misc from .db_api.json_db import Database from .notify_admins import on_startup_notify from .voice_recognition", "from .db_api.json_db import Database from .notify_admins import on_startup_notify from .voice_recognition import voice_to_text from", "import Database from .notify_admins import on_startup_notify from .voice_recognition import voice_to_text from .pages import", "<filename>utils/__init__.py from . import misc from .db_api.json_db import Database from .notify_admins import on_startup_notify", "misc from .db_api.json_db import Database from .notify_admins import on_startup_notify from .voice_recognition import voice_to_text", "Database from .notify_admins import on_startup_notify from .voice_recognition import voice_to_text from .pages import *" ]
[ "Created on Mon Apr 12 13:21:44 2021 @author: hiszm \"\"\" from sys import", "on Mon Apr 12 13:21:44 2021 @author: hiszm \"\"\" from sys import argv", "-*- coding: utf-8 -*- \"\"\" Created on Mon Apr 12 13:21:44 2021 @author:", "filename) print (txt.read()) print (\"Type the filename again:\") file_again = input (\"> \")", "print (\"Here's your file %r:\" % filename) print (txt.read()) print (\"Type the filename", "(\"Here's your file %r:\" % filename) print (txt.read()) print (\"Type the filename again:\")", "txt = open (filename) print (\"Here's your file %r:\" % filename) print (txt.read())", "-*- \"\"\" Created on Mon Apr 12 13:21:44 2021 @author: hiszm \"\"\" from", "%r:\" % filename) print (txt.read()) print (\"Type the filename again:\") file_again = input", "print (txt.read()) print (\"Type the filename again:\") file_again = input (\"> \") txt_again", "(txt.read()) print (\"Type the filename again:\") file_again = input (\"> \") txt_again =", "from sys import argv script,filename = argv txt = open (filename) print (\"Here's", "\"\"\" from sys import argv script,filename = argv txt = open (filename) print", "argv txt = open (filename) print (\"Here's your file %r:\" % filename) print", "your file %r:\" % filename) print (txt.read()) print (\"Type the filename again:\") file_again", "utf-8 -*- \"\"\" Created on Mon Apr 12 13:21:44 2021 @author: hiszm \"\"\"", "filename again:\") file_again = input (\"> \") txt_again = open (file_again) print (txt_again.read())", "argv script,filename = argv txt = open (filename) print (\"Here's your file %r:\"", "script,filename = argv txt = open (filename) print (\"Here's your file %r:\" %", "open (filename) print (\"Here's your file %r:\" % filename) print (txt.read()) print (\"Type", "sys import argv script,filename = argv txt = open (filename) print (\"Here's your", "% filename) print (txt.read()) print (\"Type the filename again:\") file_again = input (\">", "@author: hiszm \"\"\" from sys import argv script,filename = argv txt = open", "13:21:44 2021 @author: hiszm \"\"\" from sys import argv script,filename = argv txt", "= open (filename) print (\"Here's your file %r:\" % filename) print (txt.read()) print", "hiszm \"\"\" from sys import argv script,filename = argv txt = open (filename)", "(\"Type the filename again:\") file_again = input (\"> \") txt_again = open (file_again)", "= argv txt = open (filename) print (\"Here's your file %r:\" % filename)", "\"\"\" Created on Mon Apr 12 13:21:44 2021 @author: hiszm \"\"\" from sys", "file %r:\" % filename) print (txt.read()) print (\"Type the filename again:\") file_again =", "12 13:21:44 2021 @author: hiszm \"\"\" from sys import argv script,filename = argv", "(filename) print (\"Here's your file %r:\" % filename) print (txt.read()) print (\"Type the", "print (\"Type the filename again:\") file_again = input (\"> \") txt_again = open", "2021 @author: hiszm \"\"\" from sys import argv script,filename = argv txt =", "coding: utf-8 -*- \"\"\" Created on Mon Apr 12 13:21:44 2021 @author: hiszm", "Apr 12 13:21:44 2021 @author: hiszm \"\"\" from sys import argv script,filename =", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Apr 12 13:21:44 2021", "the filename again:\") file_again = input (\"> \") txt_again = open (file_again) print", "import argv script,filename = argv txt = open (filename) print (\"Here's your file", "Mon Apr 12 13:21:44 2021 @author: hiszm \"\"\" from sys import argv script,filename" ]
[ "string for the array ''' if not isinstance(a, _np.ndarray): raise TypeError(\"An ndarray expected.", "= ['ndarray_repr'] def ndarray_repr(a): '''Gets a one-line representation string for a numpy array.", "''' if not isinstance(a, _np.ndarray): raise TypeError(\"An ndarray expected. Got '{}'.\".format(type(a))) if a.size", "------- str a short representation string for the array ''' if not isinstance(a,", "array.''' import numpy as _np __all__ = ['ndarray_repr'] def ndarray_repr(a): '''Gets a one-line", "---------- a : numpy.ndarray input numpy array Returns ------- str a short representation", "__all__ = ['ndarray_repr'] def ndarray_repr(a): '''Gets a one-line representation string for a numpy", "input numpy array Returns ------- str a short representation string for the array", "for the array ''' if not isinstance(a, _np.ndarray): raise TypeError(\"An ndarray expected. Got", "a : numpy.ndarray input numpy array Returns ------- str a short representation string", "short representation string for the array ''' if not isinstance(a, _np.ndarray): raise TypeError(\"An", "a numpy array. Parameters ---------- a : numpy.ndarray input numpy array Returns -------", "20: return \"ndarray(shape={}, dtype={}, min={}, max={}, mean={}, std={})\".format(a.shape, a.dtype, a.min(), a.max(), a.mean(), a.std())", "_np __all__ = ['ndarray_repr'] def ndarray_repr(a): '''Gets a one-line representation string for a", "not isinstance(a, _np.ndarray): raise TypeError(\"An ndarray expected. Got '{}'.\".format(type(a))) if a.size > 20:", "numpy.ndarray input numpy array Returns ------- str a short representation string for the", "if not isinstance(a, _np.ndarray): raise TypeError(\"An ndarray expected. Got '{}'.\".format(type(a))) if a.size >", "\"ndarray(shape={}, dtype={}, min={}, max={}, mean={}, std={})\".format(a.shape, a.dtype, a.min(), a.max(), a.mean(), a.std()) return \"ndarray({},", "dtype={}, min={}, max={}, mean={}, std={})\".format(a.shape, a.dtype, a.min(), a.max(), a.mean(), a.std()) return \"ndarray({}, dtype={})\".format(repr(a.tolist()),", "string for a numpy array. Parameters ---------- a : numpy.ndarray input numpy array", "numpy array Returns ------- str a short representation string for the array '''", "return \"ndarray(shape={}, dtype={}, min={}, max={}, mean={}, std={})\".format(a.shape, a.dtype, a.min(), a.max(), a.mean(), a.std()) return", "array. Parameters ---------- a : numpy.ndarray input numpy array Returns ------- str a", "min={}, max={}, mean={}, std={})\".format(a.shape, a.dtype, a.min(), a.max(), a.mean(), a.std()) return \"ndarray({}, dtype={})\".format(repr(a.tolist()), a.dtype)", "ndarray expected. Got '{}'.\".format(type(a))) if a.size > 20: return \"ndarray(shape={}, dtype={}, min={}, max={},", "Parameters ---------- a : numpy.ndarray input numpy array Returns ------- str a short", "'{}'.\".format(type(a))) if a.size > 20: return \"ndarray(shape={}, dtype={}, min={}, max={}, mean={}, std={})\".format(a.shape, a.dtype,", "ndarray_repr(a): '''Gets a one-line representation string for a numpy array. Parameters ---------- a", "as _np __all__ = ['ndarray_repr'] def ndarray_repr(a): '''Gets a one-line representation string for", ": numpy.ndarray input numpy array Returns ------- str a short representation string for", "'''Gets a one-line representation string for a numpy array. Parameters ---------- a :", "numpy array. Parameters ---------- a : numpy.ndarray input numpy array Returns ------- str", "str a short representation string for the array ''' if not isinstance(a, _np.ndarray):", "array ''' if not isinstance(a, _np.ndarray): raise TypeError(\"An ndarray expected. Got '{}'.\".format(type(a))) if", "a one-line representation string for a numpy array. Parameters ---------- a : numpy.ndarray", "functions dealing with numpy array.''' import numpy as _np __all__ = ['ndarray_repr'] def", "the array ''' if not isinstance(a, _np.ndarray): raise TypeError(\"An ndarray expected. Got '{}'.\".format(type(a)))", "numpy array.''' import numpy as _np __all__ = ['ndarray_repr'] def ndarray_repr(a): '''Gets a", "if a.size > 20: return \"ndarray(shape={}, dtype={}, min={}, max={}, mean={}, std={})\".format(a.shape, a.dtype, a.min(),", "> 20: return \"ndarray(shape={}, dtype={}, min={}, max={}, mean={}, std={})\".format(a.shape, a.dtype, a.min(), a.max(), a.mean(),", "TypeError(\"An ndarray expected. Got '{}'.\".format(type(a))) if a.size > 20: return \"ndarray(shape={}, dtype={}, min={},", "import numpy as _np __all__ = ['ndarray_repr'] def ndarray_repr(a): '''Gets a one-line representation", "representation string for a numpy array. Parameters ---------- a : numpy.ndarray input numpy", "<gh_stars>0 '''Useful functions dealing with numpy array.''' import numpy as _np __all__ =", "for a numpy array. Parameters ---------- a : numpy.ndarray input numpy array Returns", "'''Useful functions dealing with numpy array.''' import numpy as _np __all__ = ['ndarray_repr']", "raise TypeError(\"An ndarray expected. Got '{}'.\".format(type(a))) if a.size > 20: return \"ndarray(shape={}, dtype={},", "dealing with numpy array.''' import numpy as _np __all__ = ['ndarray_repr'] def ndarray_repr(a):", "expected. Got '{}'.\".format(type(a))) if a.size > 20: return \"ndarray(shape={}, dtype={}, min={}, max={}, mean={},", "a.size > 20: return \"ndarray(shape={}, dtype={}, min={}, max={}, mean={}, std={})\".format(a.shape, a.dtype, a.min(), a.max(),", "one-line representation string for a numpy array. Parameters ---------- a : numpy.ndarray input", "def ndarray_repr(a): '''Gets a one-line representation string for a numpy array. Parameters ----------", "a short representation string for the array ''' if not isinstance(a, _np.ndarray): raise", "with numpy array.''' import numpy as _np __all__ = ['ndarray_repr'] def ndarray_repr(a): '''Gets", "array Returns ------- str a short representation string for the array ''' if", "representation string for the array ''' if not isinstance(a, _np.ndarray): raise TypeError(\"An ndarray", "isinstance(a, _np.ndarray): raise TypeError(\"An ndarray expected. Got '{}'.\".format(type(a))) if a.size > 20: return", "numpy as _np __all__ = ['ndarray_repr'] def ndarray_repr(a): '''Gets a one-line representation string", "_np.ndarray): raise TypeError(\"An ndarray expected. Got '{}'.\".format(type(a))) if a.size > 20: return \"ndarray(shape={},", "Returns ------- str a short representation string for the array ''' if not", "Got '{}'.\".format(type(a))) if a.size > 20: return \"ndarray(shape={}, dtype={}, min={}, max={}, mean={}, std={})\".format(a.shape,", "['ndarray_repr'] def ndarray_repr(a): '''Gets a one-line representation string for a numpy array. Parameters" ]
[ "= normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid) def", "''' input_path = os.path.abspath(input_path) output_path = os.path.abspath(output_path) for root, _, files in os.walk(input_path,", "import shape_scale_padding, \\ shapenet_path, shapenet_normalized_path import os from multiprocessing import Pool from tools.utils", "output_path): ''' Normalize *.obj file recursively :param input_path: :param output_path: :return: ''' input_path", "size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid) def normalize(obj_path): ''' normalize shapes", "ShapeNet object path :return: ''' cat, obj_file = obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path, cat),", "output_path = os.path.abspath(output_path) for root, _, files in os.walk(input_path, topdown=True): for file in", "= obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file,", "recursively :param input_path: :param output_path: :return: ''' input_path = os.path.abspath(input_path) output_path = os.path.abspath(output_path)", "''' cat, obj_file = obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i') output_cat_dir =", "import os from multiprocessing import Pool from tools.utils import append_dir, normalize_obj_file from tools.read_and_write", "in files: input_file_path = os.path.join(root, file) output_file_path = input_file_path.replace(input_path, output_path) if not os.path.exists(os.path.dirname(output_file_path)):", "os.path.abspath(output_path) for root, _, files in os.walk(input_path, topdown=True): for file in files: input_file_path", "= os.path.abspath(input_path) output_path = os.path.abspath(output_path) for root, _, files in os.walk(input_path, topdown=True): for", "os.path.join(root, file) output_file_path = input_file_path.replace(input_path, output_path) if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'):", "import Pool from tools.utils import append_dir, normalize_obj_file from tools.read_and_write import write_json, load_data_path from", "if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects = load_data_path(shapenet_path) p = Pool(processes=cpu_cores) p.map(normalize, all_objects) p.close()", "if __name__ == '__main__': if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects = load_data_path(shapenet_path) p =", "file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue else: # write obj file size_centroid_file", "os.path.exists(size_centroid_file): continue total_size, centroid = normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid = {'size': total_size.tolist(), 'centroid':", "for file in files: input_file_path = os.path.join(root, file) output_file_path = input_file_path.replace(input_path, output_path) if", "continue total_size, centroid = normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()}", ":param input_path: :param output_path: :return: ''' input_path = os.path.abspath(input_path) output_path = os.path.abspath(output_path) for", "shapenet_path, shapenet_normalized_path import os from multiprocessing import Pool from tools.utils import append_dir, normalize_obj_file", "os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue total_size, centroid = normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid = {'size':", "output_file_path, padding=shape_scale_padding) size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid) def normalize(obj_path): '''", ":return: ''' input_path = os.path.abspath(input_path) output_path = os.path.abspath(output_path) for root, _, files in", "shapenet obj files to [-0.5, 0.5]^3. author: ynie date: Jan, 2020 ''' import", "sys sys.path.append('.') from data_config import shape_scale_padding, \\ shapenet_path, shapenet_normalized_path import os from multiprocessing", "write_json, load_data_path from settings import cpu_cores def recursive_normalize(input_path, output_path): ''' Normalize *.obj file", "continue else: # write obj file size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if os.path.exists(output_file_path)", "os.path.abspath(input_path) output_path = os.path.abspath(output_path) for root, _, files in os.walk(input_path, topdown=True): for file", "input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o') recursive_normalize(input_cat_dir,", "'o') recursive_normalize(input_cat_dir, output_cat_dir) if __name__ == '__main__': if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects =", "= os.path.abspath(output_path) for root, _, files in os.walk(input_path, topdown=True): for file in files:", "'_size_centroid.json' if os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue total_size, centroid = normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid", "date: Jan, 2020 ''' import sys sys.path.append('.') from data_config import shape_scale_padding, \\ shapenet_path,", "normalize_obj_file from tools.read_and_write import write_json, load_data_path from settings import cpu_cores def recursive_normalize(input_path, output_path):", "ynie date: Jan, 2020 ''' import sys sys.path.append('.') from data_config import shape_scale_padding, \\", "normalize shapes :param obj_path: ShapeNet object path :return: ''' cat, obj_file = obj_path.split('/')[3:5]", "files: input_file_path = os.path.join(root, file) output_file_path = input_file_path.replace(input_path, output_path) if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path))", "input_path: :param output_path: :return: ''' input_path = os.path.abspath(input_path) output_path = os.path.abspath(output_path) for root,", "from data_config import shape_scale_padding, \\ shapenet_path, shapenet_normalized_path import os from multiprocessing import Pool", "output_path) if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path)", "write obj file size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue", "obj_path: ShapeNet object path :return: ''' cat, obj_file = obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path,", "2020 ''' import sys sys.path.append('.') from data_config import shape_scale_padding, \\ shapenet_path, shapenet_normalized_path import", "Normalize shapenet obj files to [-0.5, 0.5]^3. author: ynie date: Jan, 2020 '''", "os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue else: # write obj file size_centroid_file = '.'.join(output_file_path.split('.')[:-1])", "import append_dir, normalize_obj_file from tools.read_and_write import write_json, load_data_path from settings import cpu_cores def", "file size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue total_size, centroid", "write_json(size_centroid_file, size_centroid) def normalize(obj_path): ''' normalize shapes :param obj_path: ShapeNet object path :return:", "tools.read_and_write import write_json, load_data_path from settings import cpu_cores def recursive_normalize(input_path, output_path): ''' Normalize", "tools.utils import append_dir, normalize_obj_file from tools.read_and_write import write_json, load_data_path from settings import cpu_cores", "= '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue total_size, centroid = normalize_obj_file(input_file_path,", "= input_file_path.replace(input_path, output_path) if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path)", ":param obj_path: ShapeNet object path :return: ''' cat, obj_file = obj_path.split('/')[3:5] input_cat_dir =", "normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid) def normalize(obj_path):", "0.5]^3. author: ynie date: Jan, 2020 ''' import sys sys.path.append('.') from data_config import", "import sys sys.path.append('.') from data_config import shape_scale_padding, \\ shapenet_path, shapenet_normalized_path import os from", "append_dir(os.path.join(shapenet_path, cat), obj_file, 'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir) if", "if os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue total_size, centroid = normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid =", "and os.path.exists(size_centroid_file): continue total_size, centroid = normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid = {'size': total_size.tolist(),", "data_config import shape_scale_padding, \\ shapenet_path, shapenet_normalized_path import os from multiprocessing import Pool from", "append_dir, normalize_obj_file from tools.read_and_write import write_json, load_data_path from settings import cpu_cores def recursive_normalize(input_path,", "= append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir) if __name__ == '__main__': if not", "not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue else:", "shapes :param obj_path: ShapeNet object path :return: ''' cat, obj_file = obj_path.split('/')[3:5] input_cat_dir", "object path :return: ''' cat, obj_file = obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file,", "file recursively :param input_path: :param output_path: :return: ''' input_path = os.path.abspath(input_path) output_path =", "not file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue else: # write obj file", "recursive_normalize(input_path, output_path): ''' Normalize *.obj file recursively :param input_path: :param output_path: :return: '''", "import write_json, load_data_path from settings import cpu_cores def recursive_normalize(input_path, output_path): ''' Normalize *.obj", "os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue else: # write", "if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue else: # write obj file size_centroid_file =", "obj files to [-0.5, 0.5]^3. author: ynie date: Jan, 2020 ''' import sys", "total_size, centroid = normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file,", "os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue else: #", "size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue total_size, centroid =", "obj_file = obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat),", "if not file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue else: # write obj", "input_path = os.path.abspath(input_path) output_path = os.path.abspath(output_path) for root, _, files in os.walk(input_path, topdown=True):", "from settings import cpu_cores def recursive_normalize(input_path, output_path): ''' Normalize *.obj file recursively :param", "output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir) if __name__ == '__main__': if", "+ '_size_centroid.json' if os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue total_size, centroid = normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding)", "'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir) if __name__ == '__main__':", "os.symlink(input_file_path, output_file_path) continue else: # write obj file size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json'", "from multiprocessing import Pool from tools.utils import append_dir, normalize_obj_file from tools.read_and_write import write_json,", "root, _, files in os.walk(input_path, topdown=True): for file in files: input_file_path = os.path.join(root,", "files in os.walk(input_path, topdown=True): for file in files: input_file_path = os.path.join(root, file) output_file_path", "shapenet_normalized_path import os from multiprocessing import Pool from tools.utils import append_dir, normalize_obj_file from", "normalize(obj_path): ''' normalize shapes :param obj_path: ShapeNet object path :return: ''' cat, obj_file", "# write obj file size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if os.path.exists(output_file_path) and os.path.exists(size_centroid_file):", "cat, obj_file = obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path,", "topdown=True): for file in files: input_file_path = os.path.join(root, file) output_file_path = input_file_path.replace(input_path, output_path)", "== '__main__': if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects = load_data_path(shapenet_path) p = Pool(processes=cpu_cores) p.map(normalize,", "output_cat_dir) if __name__ == '__main__': if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects = load_data_path(shapenet_path) p", "Normalize *.obj file recursively :param input_path: :param output_path: :return: ''' input_path = os.path.abspath(input_path)", "'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid) def normalize(obj_path): ''' normalize shapes :param obj_path: ShapeNet object", "def recursive_normalize(input_path, output_path): ''' Normalize *.obj file recursively :param input_path: :param output_path: :return:", "obj_file, 'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir) if __name__ ==", "''' Normalize shapenet obj files to [-0.5, 0.5]^3. author: ynie date: Jan, 2020", "sys.path.append('.') from data_config import shape_scale_padding, \\ shapenet_path, shapenet_normalized_path import os from multiprocessing import", "multiprocessing import Pool from tools.utils import append_dir, normalize_obj_file from tools.read_and_write import write_json, load_data_path", "Pool from tools.utils import append_dir, normalize_obj_file from tools.read_and_write import write_json, load_data_path from settings", "'__main__': if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects = load_data_path(shapenet_path) p = Pool(processes=cpu_cores) p.map(normalize, all_objects)", "in os.walk(input_path, topdown=True): for file in files: input_file_path = os.path.join(root, file) output_file_path =", "append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir) if __name__ == '__main__': if not os.path.exists(shapenet_normalized_path):", "cpu_cores def recursive_normalize(input_path, output_path): ''' Normalize *.obj file recursively :param input_path: :param output_path:", "os from multiprocessing import Pool from tools.utils import append_dir, normalize_obj_file from tools.read_and_write import", "\\ shapenet_path, shapenet_normalized_path import os from multiprocessing import Pool from tools.utils import append_dir,", ":return: ''' cat, obj_file = obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i') output_cat_dir", "centroid = normalize_obj_file(input_file_path, output_file_path, padding=shape_scale_padding) size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid)", "[-0.5, 0.5]^3. author: ynie date: Jan, 2020 ''' import sys sys.path.append('.') from data_config", "import cpu_cores def recursive_normalize(input_path, output_path): ''' Normalize *.obj file recursively :param input_path: :param", "file) output_file_path = input_file_path.replace(input_path, output_path) if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'): if", "not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects = load_data_path(shapenet_path) p = Pool(processes=cpu_cores) p.map(normalize, all_objects) p.close() p.join()", "def normalize(obj_path): ''' normalize shapes :param obj_path: ShapeNet object path :return: ''' cat,", ":param output_path: :return: ''' input_path = os.path.abspath(input_path) output_path = os.path.abspath(output_path) for root, _,", "*.obj file recursively :param input_path: :param output_path: :return: ''' input_path = os.path.abspath(input_path) output_path", "__name__ == '__main__': if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects = load_data_path(shapenet_path) p = Pool(processes=cpu_cores)", "if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue", "os.remove(output_file_path) os.symlink(input_file_path, output_file_path) continue else: # write obj file size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) +", "author: ynie date: Jan, 2020 ''' import sys sys.path.append('.') from data_config import shape_scale_padding,", "settings import cpu_cores def recursive_normalize(input_path, output_path): ''' Normalize *.obj file recursively :param input_path:", "output_path: :return: ''' input_path = os.path.abspath(input_path) output_path = os.path.abspath(output_path) for root, _, files", "total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid) def normalize(obj_path): ''' normalize shapes :param obj_path: ShapeNet", "obj file size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue total_size,", "obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o')", "output_file_path) continue else: # write obj file size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if", "cat), obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir) if __name__ == '__main__': if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path)", "from tools.read_and_write import write_json, load_data_path from settings import cpu_cores def recursive_normalize(input_path, output_path): '''", "= append_dir(os.path.join(shapenet_path, cat), obj_file, 'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir)", "path :return: ''' cat, obj_file = obj_path.split('/')[3:5] input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i')", "input_file_path.replace(input_path, output_path) if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'): if os.path.exists(output_file_path): os.remove(output_file_path) os.symlink(input_file_path,", "obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir) if __name__ == '__main__': if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects", "files to [-0.5, 0.5]^3. author: ynie date: Jan, 2020 ''' import sys sys.path.append('.')", "''' normalize shapes :param obj_path: ShapeNet object path :return: ''' cat, obj_file =", "''' Normalize *.obj file recursively :param input_path: :param output_path: :return: ''' input_path =", "Jan, 2020 ''' import sys sys.path.append('.') from data_config import shape_scale_padding, \\ shapenet_path, shapenet_normalized_path", "'.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if os.path.exists(output_file_path) and os.path.exists(size_centroid_file): continue total_size, centroid = normalize_obj_file(input_file_path, output_file_path,", "_, files in os.walk(input_path, topdown=True): for file in files: input_file_path = os.path.join(root, file)", "else: # write obj file size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json' if os.path.exists(output_file_path) and", "recursive_normalize(input_cat_dir, output_cat_dir) if __name__ == '__main__': if not os.path.exists(shapenet_normalized_path): os.mkdir(shapenet_normalized_path) all_objects = load_data_path(shapenet_path)", "from tools.utils import append_dir, normalize_obj_file from tools.read_and_write import write_json, load_data_path from settings import", "cat), obj_file, 'i') output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o') recursive_normalize(input_cat_dir, output_cat_dir) if __name__", "= os.path.join(root, file) output_file_path = input_file_path.replace(input_path, output_path) if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not", "input_file_path = os.path.join(root, file) output_file_path = input_file_path.replace(input_path, output_path) if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if", "centroid.tolist()} write_json(size_centroid_file, size_centroid) def normalize(obj_path): ''' normalize shapes :param obj_path: ShapeNet object path", "{'size': total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid) def normalize(obj_path): ''' normalize shapes :param obj_path:", "padding=shape_scale_padding) size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid) def normalize(obj_path): ''' normalize", "os.walk(input_path, topdown=True): for file in files: input_file_path = os.path.join(root, file) output_file_path = input_file_path.replace(input_path,", "to [-0.5, 0.5]^3. author: ynie date: Jan, 2020 ''' import sys sys.path.append('.') from", "load_data_path from settings import cpu_cores def recursive_normalize(input_path, output_path): ''' Normalize *.obj file recursively", "''' import sys sys.path.append('.') from data_config import shape_scale_padding, \\ shapenet_path, shapenet_normalized_path import os", "size_centroid) def normalize(obj_path): ''' normalize shapes :param obj_path: ShapeNet object path :return: '''", "output_file_path = input_file_path.replace(input_path, output_path) if not os.path.exists(os.path.dirname(output_file_path)): os.makedirs(os.path.dirname(output_file_path)) if not file.endswith('.obj'): if os.path.exists(output_file_path):", "for root, _, files in os.walk(input_path, topdown=True): for file in files: input_file_path =", "file in files: input_file_path = os.path.join(root, file) output_file_path = input_file_path.replace(input_path, output_path) if not", "= {'size': total_size.tolist(), 'centroid': centroid.tolist()} write_json(size_centroid_file, size_centroid) def normalize(obj_path): ''' normalize shapes :param", "shape_scale_padding, \\ shapenet_path, shapenet_normalized_path import os from multiprocessing import Pool from tools.utils import" ]
[ "import admin from django.views.generic import TemplateView from rsvps.views import GuestRsvpView urlpatterns = [", "admin from django.views.generic import TemplateView from rsvps.views import GuestRsvpView urlpatterns = [ url(r'^$',", "GuestRsvpView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"), name='event'),", "TemplateView from rsvps.views import GuestRsvpView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"),", "rsvps.views import GuestRsvpView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$',", "import include, url from django.contrib import admin from django.views.generic import TemplateView from rsvps.views", "= [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"), name='event'), url(r'^registry/$', TemplateView.as_view(template_name=\"registry.html\"),", "from django.conf.urls import include, url from django.contrib import admin from django.views.generic import TemplateView", "urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"), name='event'), url(r'^registry/$',", "[ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"), name='event'), url(r'^registry/$', TemplateView.as_view(template_name=\"registry.html\"), name='registry'),", "TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"), name='event'), url(r'^registry/$', TemplateView.as_view(template_name=\"registry.html\"), name='registry'), url(r'^rsvp/(?P<pk>[0-9]+)/$', GuestRsvpView.as_view()),", "url from django.contrib import admin from django.views.generic import TemplateView from rsvps.views import GuestRsvpView", "TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"), name='event'), url(r'^registry/$', TemplateView.as_view(template_name=\"registry.html\"), name='registry'), url(r'^rsvp/(?P<pk>[0-9]+)/$', GuestRsvpView.as_view()), url(r'^admin/', include(admin.site.urls)), ]", "include, url from django.contrib import admin from django.views.generic import TemplateView from rsvps.views import", "import GuestRsvpView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"),", "url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"), name='event'), url(r'^registry/$', TemplateView.as_view(template_name=\"registry.html\"), name='registry'), url(r'^rsvp/(?P<pk>[0-9]+)/$', GuestRsvpView.as_view()), url(r'^admin/', include(admin.site.urls)),", "django.conf.urls import include, url from django.contrib import admin from django.views.generic import TemplateView from", "import TemplateView from rsvps.views import GuestRsvpView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$',", "django.views.generic import TemplateView from rsvps.views import GuestRsvpView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'),", "from django.views.generic import TemplateView from rsvps.views import GuestRsvpView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"),", "url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"), name='event'), url(r'^registry/$', TemplateView.as_view(template_name=\"registry.html\"), name='registry'), url(r'^rsvp/(?P<pk>[0-9]+)/$',", "from rsvps.views import GuestRsvpView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name=\"home.html\"), name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'),", "name='home'), url(r'^about/$', TemplateView.as_view(template_name=\"about.html\"), name='about'), url(r'^event/$', TemplateView.as_view(template_name=\"event.html\"), name='event'), url(r'^registry/$', TemplateView.as_view(template_name=\"registry.html\"), name='registry'), url(r'^rsvp/(?P<pk>[0-9]+)/$', GuestRsvpView.as_view()), url(r'^admin/',", "django.contrib import admin from django.views.generic import TemplateView from rsvps.views import GuestRsvpView urlpatterns =", "from django.contrib import admin from django.views.generic import TemplateView from rsvps.views import GuestRsvpView urlpatterns" ]
[ "random def main(): filename = 'test' n_hidden = 64 mutual_infos = [] for", "filename = 'test' n_hidden = 64 mutual_infos = [] for i in range(n_hidden):", "for i in range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i] with open(f'mutual_info_{filename}.toml', 'w') as f: toml_str", "[] for i in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict = {} for i in range(n_hidden):", "in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict = {} for i in range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i]", "i in range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i] with open(f'mutual_info_{filename}.toml', 'w') as f: toml_str =", "'test' n_hidden = 64 mutual_infos = [] for i in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict", "n_hidden = 64 mutual_infos = [] for i in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict =", "main(): filename = 'test' n_hidden = 64 mutual_infos = [] for i in", "range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict = {} for i in range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i] with", "mutual_infos.append(random.random()) mutual_info_dict = {} for i in range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i] with open(f'mutual_info_{filename}.toml',", "import toml import random def main(): filename = 'test' n_hidden = 64 mutual_infos", "mutual_infos = [] for i in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict = {} for i", "for i in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict = {} for i in range(n_hidden): mutual_info_dict[f'{i:04}']", "= {} for i in range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i] with open(f'mutual_info_{filename}.toml', 'w') as", "= 'test' n_hidden = 64 mutual_infos = [] for i in range(n_hidden): mutual_infos.append(random.random())", "mutual_info_dict[f'{i:04}'] = mutual_infos[i] with open(f'mutual_info_{filename}.toml', 'w') as f: toml_str = toml.dump(mutual_info_dict, f) print(toml_str)", "= mutual_infos[i] with open(f'mutual_info_{filename}.toml', 'w') as f: toml_str = toml.dump(mutual_info_dict, f) print(toml_str) main()", "in range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i] with open(f'mutual_info_{filename}.toml', 'w') as f: toml_str = toml.dump(mutual_info_dict,", "i in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict = {} for i in range(n_hidden): mutual_info_dict[f'{i:04}'] =", "{} for i in range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i] with open(f'mutual_info_{filename}.toml', 'w') as f:", "def main(): filename = 'test' n_hidden = 64 mutual_infos = [] for i", "64 mutual_infos = [] for i in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict = {} for", "range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i] with open(f'mutual_info_{filename}.toml', 'w') as f: toml_str = toml.dump(mutual_info_dict, f)", "toml import random def main(): filename = 'test' n_hidden = 64 mutual_infos =", "mutual_info_dict = {} for i in range(n_hidden): mutual_info_dict[f'{i:04}'] = mutual_infos[i] with open(f'mutual_info_{filename}.toml', 'w')", "import random def main(): filename = 'test' n_hidden = 64 mutual_infos = []", "= [] for i in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict = {} for i in", "= 64 mutual_infos = [] for i in range(n_hidden): mutual_infos.append(random.random()) mutual_info_dict = {}" ]
[ "@property def pressure(self) -> float: # hPa return 1013.25 * self.value @property def", "100.0 * self.value @property def pressure(self) -> float: # hPa return 1013.25 *", "float: # hPa return 1013.25 * self.value @property def temperature(self) -> float: #", "import Base class BME280(Base): @property def humidity(self) -> float: # % return 100.0", "Base class BME280(Base): @property def humidity(self) -> float: # % return 100.0 *", "def pressure(self) -> float: # hPa return 1013.25 * self.value @property def temperature(self)", "% return 100.0 * self.value @property def pressure(self) -> float: # hPa return", "from mocks.base import Base class BME280(Base): @property def humidity(self) -> float: # %", "return 100.0 * self.value @property def pressure(self) -> float: # hPa return 1013.25", "-> float: # % return 100.0 * self.value @property def pressure(self) -> float:", "# hPa return 1013.25 * self.value @property def temperature(self) -> float: # ºC", "class BME280(Base): @property def humidity(self) -> float: # % return 100.0 * self.value", "* self.value @property def temperature(self) -> float: # ºC return 100.0 * self.value", "hPa return 1013.25 * self.value @property def temperature(self) -> float: # ºC return", "1013.25 * self.value @property def temperature(self) -> float: # ºC return 100.0 *", "@property def humidity(self) -> float: # % return 100.0 * self.value @property def", "humidity(self) -> float: # % return 100.0 * self.value @property def pressure(self) ->", "# % return 100.0 * self.value @property def pressure(self) -> float: # hPa", "BME280(Base): @property def humidity(self) -> float: # % return 100.0 * self.value @property", "return 1013.25 * self.value @property def temperature(self) -> float: # ºC return 100.0", "pressure(self) -> float: # hPa return 1013.25 * self.value @property def temperature(self) ->", "float: # % return 100.0 * self.value @property def pressure(self) -> float: #", "def humidity(self) -> float: # % return 100.0 * self.value @property def pressure(self)", "-> float: # hPa return 1013.25 * self.value @property def temperature(self) -> float:", "mocks.base import Base class BME280(Base): @property def humidity(self) -> float: # % return", "* self.value @property def pressure(self) -> float: # hPa return 1013.25 * self.value", "self.value @property def pressure(self) -> float: # hPa return 1013.25 * self.value @property" ]
[ "letra O ou X def escolhaLetraJogador(): l = \"\" while l != \"O\"", "range(9): t.append(' ') return t # Mostrar o tabuleiro def mostraTabuleiro(posi): print(\" |", "True else: return False def criaTabuleiro(): t = [] t.append('') for i in", "\") print(' {} | {} | {} '.format(posi[7],posi[8],posi[9])) print(\" | | \") print(\"-----------\")", "| {} | {} '.format(posi[1], posi[2], posi[3])) print(\" | | \") letras =", "cantos # 4) Jogar no centro # 5) Jogar nos lados # Verifica", "return False def criaTabuleiro(): t = [] t.append('') for i in range(9): t.append('", "na próxima jogada # 3) Jogar nos cantos # 4) Jogar no centro", "5) Jogar nos lados # Verifica se houve vencedor # Verifica se houve", "while l != \"O\" and l != \"X\": l = str(input('Escolha a letra", "= \"\" while l != \"O\" and l != \"X\": l = str(input('Escolha", "print(\" | | \") print(' {} | {} | {} '.format(posi[4], posi[5], posi[6]))", "# 1) Executar movimento para vencer # 2) Executar movimento para bloquaer o", "\") letras = escolhaLetraJogador() vezJogador = iniciaJogador() #tabuleiro = [' ','X',' ','O',' ','X','O','", "jogador # Vez do Computador # Definir movimento do computador # 1) Executar", "else: return False def criaTabuleiro(): t = [] t.append('') for i in range(9):", "print(' {} | {} | {} '.format(posi[1], posi[2], posi[3])) print(\" | | \")", "tabuleiro # Receber o movimento do jogador # Vez do Computador # Definir", "o movimento do jogador # Vez do Computador # Definir movimento do computador", "escolhaLetraJogador(): l = \"\" while l != \"O\" and l != \"X\": l", "random.randint(1,2) == 1: return True else: return False def criaTabuleiro(): t = []", "# 5) Jogar nos lados # Verifica se houve vencedor # Verifica se", "ou X): ')).upper() if l == \"O\": letras = ['O', \"X\"] else: letras", "Mostrar o tabuleiro def mostraTabuleiro(posi): print(\" | | \") print(' {} | {}", "| {} | {} '.format(posi[7],posi[8],posi[9])) print(\" | | \") print(\"-----------\") print(\" | |", "houve vencedor # Verifica se houve empate # Pergunta se o Jogador deseja", "1) Executar movimento para vencer # 2) Executar movimento para bloquaer o jogador", "| | \") print(' {} | {} | {} '.format(posi[4], posi[5], posi[6])) print(\"", "letras = ['O', \"X\"] else: letras = ['X', \"O\"] return letras # Sortear", "def criaTabuleiro(): t = [] t.append('') for i in range(9): t.append(' ') return", "{} | {} '.format(posi[7],posi[8],posi[9])) print(\" | | \") print(\"-----------\") print(\" | | \")", "\"O\"] return letras # Sortear quem começa primeiro def iniciaJogador(): if random.randint(1,2) ==", "o jogador de vencer na próxima jogada # 3) Jogar nos cantos #", "jogar (O ou X): ')).upper() if l == \"O\": letras = ['O', \"X\"]", "','O','X'] tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez do Jogador # Mostrar o tabuleiro", "print(\"-----------\") print(\" | | \") print(' {} | {} | {} '.format(posi[4], posi[5],", "if l == \"O\": letras = ['O', \"X\"] else: letras = ['X', \"O\"]", "and l != \"X\": l = str(input('Escolha a letra que prefere jogar (O", "Verifica se houve vencedor # Verifica se houve empate # Pergunta se o", "Executar movimento para bloquaer o jogador de vencer na próxima jogada # 3)", "print(\"-----------\") print(\" | | \") print(' {} | {} | {} '.format(posi[1], posi[2],", "| \") print(' {} | {} | {} '.format(posi[4], posi[5], posi[6])) print(\" |", "else: letras = ['X', \"O\"] return letras # Sortear quem começa primeiro def", "{} '.format(posi[1], posi[2], posi[3])) print(\" | | \") letras = escolhaLetraJogador() vezJogador =", "return letras # Sortear quem começa primeiro def iniciaJogador(): if random.randint(1,2) == 1:", "posi[5], posi[6])) print(\" | | \") print(\"-----------\") print(\" | | \") print(' {}", "| \") print(' {} | {} | {} '.format(posi[7],posi[8],posi[9])) print(\" | | \")", "= [' ','X',' ','O',' ','X','O',' ','O','X'] tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez do", "3) Jogar nos cantos # 4) Jogar no centro # 5) Jogar nos", "# Mostrar o tabuleiro # Receber o movimento do jogador # Vez do", "letras = ['X', \"O\"] return letras # Sortear quem começa primeiro def iniciaJogador():", "[] t.append('') for i in range(9): t.append(' ') return t # Mostrar o", "do Jogador # Mostrar o tabuleiro # Receber o movimento do jogador #", "| {} '.format(posi[1], posi[2], posi[3])) print(\" | | \") letras = escolhaLetraJogador() vezJogador", "iniciaJogador() #tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X'] tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro) #", "\") print(' {} | {} | {} '.format(posi[4], posi[5], posi[6])) print(\" | |", "lados # Verifica se houve vencedor # Verifica se houve empate # Pergunta", "prefere jogar (O ou X): ')).upper() if l == \"O\": letras = ['O',", "Vez do Computador # Definir movimento do computador # 1) Executar movimento para", "\"X\": l = str(input('Escolha a letra que prefere jogar (O ou X): ')).upper()", "= [] t.append('') for i in range(9): t.append(' ') return t # Mostrar", "t.append(' ') return t # Mostrar o tabuleiro def mostraTabuleiro(posi): print(\" | |", "ou X def escolhaLetraJogador(): l = \"\" while l != \"O\" and l", "print(\" | | \") print(\"-----------\") print(\" | | \") print(' {} | {}", "tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez do Jogador # Mostrar o tabuleiro #", "# Vez do Jogador # Mostrar o tabuleiro # Receber o movimento do", "Jogar nos cantos # 4) Jogar no centro # 5) Jogar nos lados", "') return t # Mostrar o tabuleiro def mostraTabuleiro(posi): print(\" | | \")", "| | \") letras = escolhaLetraJogador() vezJogador = iniciaJogador() #tabuleiro = [' ','X','", "['O', \"X\"] else: letras = ['X', \"O\"] return letras # Sortear quem começa", "# Mostrar o tabuleiro def mostraTabuleiro(posi): print(\" | | \") print(' {} |", "import random # Pedir ao Jogador para escolher uma letra O ou X", "!= \"X\": l = str(input('Escolha a letra que prefere jogar (O ou X):", "letra que prefere jogar (O ou X): ')).upper() if l == \"O\": letras", "= ['X', \"O\"] return letras # Sortear quem começa primeiro def iniciaJogador(): if", "movimento para vencer # 2) Executar movimento para bloquaer o jogador de vencer", "para bloquaer o jogador de vencer na próxima jogada # 3) Jogar nos", "== 1: return True else: return False def criaTabuleiro(): t = [] t.append('')", "posi[3])) print(\" | | \") letras = escolhaLetraJogador() vezJogador = iniciaJogador() #tabuleiro =", "movimento do computador # 1) Executar movimento para vencer # 2) Executar movimento", "i in range(9): t.append(' ') return t # Mostrar o tabuleiro def mostraTabuleiro(posi):", "Pedir ao Jogador para escolher uma letra O ou X def escolhaLetraJogador(): l", "')).upper() if l == \"O\": letras = ['O', \"X\"] else: letras = ['X',", "\"O\" and l != \"X\": l = str(input('Escolha a letra que prefere jogar", "'.format(posi[4], posi[5], posi[6])) print(\" | | \") print(\"-----------\") print(\" | | \") print('", "4) Jogar no centro # 5) Jogar nos lados # Verifica se houve", "def escolhaLetraJogador(): l = \"\" while l != \"O\" and l != \"X\":", "= iniciaJogador() #tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X'] tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro)", "Definir movimento do computador # 1) Executar movimento para vencer # 2) Executar", "['X', \"O\"] return letras # Sortear quem começa primeiro def iniciaJogador(): if random.randint(1,2)", "# Vez do Computador # Definir movimento do computador # 1) Executar movimento", "próxima jogada # 3) Jogar nos cantos # 4) Jogar no centro #", "# Pedir ao Jogador para escolher uma letra O ou X def escolhaLetraJogador():", "return t # Mostrar o tabuleiro def mostraTabuleiro(posi): print(\" | | \") print('", "print(' {} | {} | {} '.format(posi[4], posi[5], posi[6])) print(\" | | \")", "Vez do Jogador # Mostrar o tabuleiro # Receber o movimento do jogador", "| {} '.format(posi[4], posi[5], posi[6])) print(\" | | \") print(\"-----------\") print(\" | |", "letras = escolhaLetraJogador() vezJogador = iniciaJogador() #tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X']", "Jogador # Mostrar o tabuleiro # Receber o movimento do jogador # Vez", "no centro # 5) Jogar nos lados # Verifica se houve vencedor #", "nos lados # Verifica se houve vencedor # Verifica se houve empate #", "| | \") print(' {} | {} | {} '.format(posi[1], posi[2], posi[3])) print(\"", "centro # 5) Jogar nos lados # Verifica se houve vencedor # Verifica", "1: return True else: return False def criaTabuleiro(): t = [] t.append('') for", "(O ou X): ')).upper() if l == \"O\": letras = ['O', \"X\"] else:", "l = str(input('Escolha a letra que prefere jogar (O ou X): ')).upper() if", "= criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez do Jogador # Mostrar o tabuleiro # Receber", "# 3) Jogar nos cantos # 4) Jogar no centro # 5) Jogar", "\"X\"] else: letras = ['X', \"O\"] return letras # Sortear quem começa primeiro", "Jogador para escolher uma letra O ou X def escolhaLetraJogador(): l = \"\"", "= str(input('Escolha a letra que prefere jogar (O ou X): ')).upper() if l", "# Sortear quem começa primeiro def iniciaJogador(): if random.randint(1,2) == 1: return True", "| \") print(' {} | {} | {} '.format(posi[1], posi[2], posi[3])) print(\" |", "# Verifica se houve empate # Pergunta se o Jogador deseja jogar novamente", "str(input('Escolha a letra que prefere jogar (O ou X): ')).upper() if l ==", "False def criaTabuleiro(): t = [] t.append('') for i in range(9): t.append(' ')", "X): ')).upper() if l == \"O\": letras = ['O', \"X\"] else: letras =", "jogada # 3) Jogar nos cantos # 4) Jogar no centro # 5)", "if random.randint(1,2) == 1: return True else: return False def criaTabuleiro(): t =", "{} | {} | {} '.format(posi[7],posi[8],posi[9])) print(\" | | \") print(\"-----------\") print(\" |", "vencer # 2) Executar movimento para bloquaer o jogador de vencer na próxima", "'.format(posi[1], posi[2], posi[3])) print(\" | | \") letras = escolhaLetraJogador() vezJogador = iniciaJogador()", "[' ','X',' ','O',' ','X','O',' ','O','X'] tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez do Jogador", "l == \"O\": letras = ['O', \"X\"] else: letras = ['X', \"O\"] return", "\") print(\"-----------\") print(\" | | \") print(' {} | {} | {} '.format(posi[1],", "| \") letras = escolhaLetraJogador() vezJogador = iniciaJogador() #tabuleiro = [' ','X',' ','O','", "Sortear quem começa primeiro def iniciaJogador(): if random.randint(1,2) == 1: return True else:", "ao Jogador para escolher uma letra O ou X def escolhaLetraJogador(): l =", "{} '.format(posi[7],posi[8],posi[9])) print(\" | | \") print(\"-----------\") print(\" | | \") print(' {}", "do Computador # Definir movimento do computador # 1) Executar movimento para vencer", "def iniciaJogador(): if random.randint(1,2) == 1: return True else: return False def criaTabuleiro():", "| \") print(\"-----------\") print(\" | | \") print(' {} | {} | {}", "bloquaer o jogador de vencer na próxima jogada # 3) Jogar nos cantos", "nos cantos # 4) Jogar no centro # 5) Jogar nos lados #", "= ['O', \"X\"] else: letras = ['X', \"O\"] return letras # Sortear quem", "| | \") print(\"-----------\") print(\" | | \") print(' {} | {} |", "print(\" | | \") letras = escolhaLetraJogador() vezJogador = iniciaJogador() #tabuleiro = ['", "# Verifica se houve vencedor # Verifica se houve empate # Pergunta se", "t = [] t.append('') for i in range(9): t.append(' ') return t #", "escolher uma letra O ou X def escolhaLetraJogador(): l = \"\" while l", "Receber o movimento do jogador # Vez do Computador # Definir movimento do", "para vencer # 2) Executar movimento para bloquaer o jogador de vencer na", "\") print(\"-----------\") print(\" | | \") print(' {} | {} | {} '.format(posi[4],", "uma letra O ou X def escolhaLetraJogador(): l = \"\" while l !=", "print(' {} | {} | {} '.format(posi[7],posi[8],posi[9])) print(\" | | \") print(\"-----------\") print(\"", "== \"O\": letras = ['O', \"X\"] else: letras = ['X', \"O\"] return letras", "se houve vencedor # Verifica se houve empate # Pergunta se o Jogador", "posi[6])) print(\" | | \") print(\"-----------\") print(\" | | \") print(' {} |", "Executar movimento para vencer # 2) Executar movimento para bloquaer o jogador de", "= escolhaLetraJogador() vezJogador = iniciaJogador() #tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X'] tabuleiro", "!= \"O\" and l != \"X\": l = str(input('Escolha a letra que prefere", "{} '.format(posi[4], posi[5], posi[6])) print(\" | | \") print(\"-----------\") print(\" | | \")", "jogador de vencer na próxima jogada # 3) Jogar nos cantos # 4)", "iniciaJogador(): if random.randint(1,2) == 1: return True else: return False def criaTabuleiro(): t", "mostraTabuleiro(tabuleiro) # Vez do Jogador # Mostrar o tabuleiro # Receber o movimento", "\"\" while l != \"O\" and l != \"X\": l = str(input('Escolha a", "l != \"O\" and l != \"X\": l = str(input('Escolha a letra que", "\") print(' {} | {} | {} '.format(posi[1], posi[2], posi[3])) print(\" | |", "{} | {} | {} '.format(posi[1], posi[2], posi[3])) print(\" | | \") letras", "'.format(posi[7],posi[8],posi[9])) print(\" | | \") print(\"-----------\") print(\" | | \") print(' {} |", "{} | {} | {} '.format(posi[4], posi[5], posi[6])) print(\" | | \") print(\"-----------\")", "{} | {} '.format(posi[4], posi[5], posi[6])) print(\" | | \") print(\"-----------\") print(\" |", "o tabuleiro def mostraTabuleiro(posi): print(\" | | \") print(' {} | {} |", "vezJogador = iniciaJogador() #tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X'] tabuleiro = criaTabuleiro()", "quem começa primeiro def iniciaJogador(): if random.randint(1,2) == 1: return True else: return", "o tabuleiro # Receber o movimento do jogador # Vez do Computador #", "Computador # Definir movimento do computador # 1) Executar movimento para vencer #", "movimento para bloquaer o jogador de vencer na próxima jogada # 3) Jogar", "random # Pedir ao Jogador para escolher uma letra O ou X def", "posi[2], posi[3])) print(\" | | \") letras = escolhaLetraJogador() vezJogador = iniciaJogador() #tabuleiro", "# 2) Executar movimento para bloquaer o jogador de vencer na próxima jogada", "que prefere jogar (O ou X): ')).upper() if l == \"O\": letras =", "computador # 1) Executar movimento para vencer # 2) Executar movimento para bloquaer", "vencer na próxima jogada # 3) Jogar nos cantos # 4) Jogar no", "t.append('') for i in range(9): t.append(' ') return t # Mostrar o tabuleiro", "para escolher uma letra O ou X def escolhaLetraJogador(): l = \"\" while", "criaTabuleiro(): t = [] t.append('') for i in range(9): t.append(' ') return t", "| {} | {} '.format(posi[4], posi[5], posi[6])) print(\" | | \") print(\"-----------\") print(\"", "de vencer na próxima jogada # 3) Jogar nos cantos # 4) Jogar", "tabuleiro def mostraTabuleiro(posi): print(\" | | \") print(' {} | {} | {}", "\"O\": letras = ['O', \"X\"] else: letras = ['X', \"O\"] return letras #", "do jogador # Vez do Computador # Definir movimento do computador # 1)", "# Definir movimento do computador # 1) Executar movimento para vencer # 2)", "def mostraTabuleiro(posi): print(\" | | \") print(' {} | {} | {} '.format(posi[7],posi[8],posi[9]))", "return True else: return False def criaTabuleiro(): t = [] t.append('') for i", "for i in range(9): t.append(' ') return t # Mostrar o tabuleiro def", "{} | {} '.format(posi[1], posi[2], posi[3])) print(\" | | \") letras = escolhaLetraJogador()", "','O',' ','X','O',' ','O','X'] tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez do Jogador # Mostrar", "print(\" | | \") print(' {} | {} | {} '.format(posi[1], posi[2], posi[3]))", "X def escolhaLetraJogador(): l = \"\" while l != \"O\" and l !=", "','X','O',' ','O','X'] tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez do Jogador # Mostrar o", "a letra que prefere jogar (O ou X): ')).upper() if l == \"O\":", "in range(9): t.append(' ') return t # Mostrar o tabuleiro def mostraTabuleiro(posi): print(\"", "começa primeiro def iniciaJogador(): if random.randint(1,2) == 1: return True else: return False", "letras # Sortear quem começa primeiro def iniciaJogador(): if random.randint(1,2) == 1: return", "primeiro def iniciaJogador(): if random.randint(1,2) == 1: return True else: return False def", "l = \"\" while l != \"O\" and l != \"X\": l =", "print(\" | | \") print(' {} | {} | {} '.format(posi[7],posi[8],posi[9])) print(\" |", "Jogar no centro # 5) Jogar nos lados # Verifica se houve vencedor", "t # Mostrar o tabuleiro def mostraTabuleiro(posi): print(\" | | \") print(' {}", "| {} '.format(posi[7],posi[8],posi[9])) print(\" | | \") print(\"-----------\") print(\" | | \") print('", "| | \") print(' {} | {} | {} '.format(posi[7],posi[8],posi[9])) print(\" | |", "2) Executar movimento para bloquaer o jogador de vencer na próxima jogada #", "mostraTabuleiro(posi): print(\" | | \") print(' {} | {} | {} '.format(posi[7],posi[8],posi[9])) print(\"", "movimento do jogador # Vez do Computador # Definir movimento do computador #", "do computador # 1) Executar movimento para vencer # 2) Executar movimento para", "Mostrar o tabuleiro # Receber o movimento do jogador # Vez do Computador", "Jogar nos lados # Verifica se houve vencedor # Verifica se houve empate", "# 4) Jogar no centro # 5) Jogar nos lados # Verifica se", "l != \"X\": l = str(input('Escolha a letra que prefere jogar (O ou", "escolhaLetraJogador() vezJogador = iniciaJogador() #tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X'] tabuleiro =", "criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez do Jogador # Mostrar o tabuleiro # Receber o", "vencedor # Verifica se houve empate # Pergunta se o Jogador deseja jogar", "#tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X'] tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez", "O ou X def escolhaLetraJogador(): l = \"\" while l != \"O\" and", "# Receber o movimento do jogador # Vez do Computador # Definir movimento", "','X',' ','O',' ','X','O',' ','O','X'] tabuleiro = criaTabuleiro() mostraTabuleiro(tabuleiro) # Vez do Jogador #" ]
[ "self.conv1 = SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(128) self.conv2 =", "padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False) def", "= nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 512 def", "Block 3. ''' def __init__(self, in_channels): super(Block3, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 256, (1,", "self.r_bn1(rx) # Main way x = self.conv1(x) x = self.bn1(x) x = torch.relu(x)", "self.out_channels = 512 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx =", "nn.BatchNorm2d(256) self.out_channels = 256 def forward(self, x): # Shortcut rx = x #", "Block5(self.block4_lst[0].out_channels) else: self.block5 = Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1) self.final =", "return output class Block1(nn.Module): ''' Definition of Block 1. ''' def __init__(self, in_channels):", "with Block[1-6] utilized. ''' def __init__(self, in_channels, num_classes, num_middle_layers=4): super(Network, self).__init__() self.block1 =", "nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels,", "= self.mp3(x) # Confluence x = x + rx return x class Block6(nn.Module):", "nn.Conv2d(32, 64, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64) self.out_channels = 64 def", "self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 256 def forward(self, x):", "kernel_size=1, stride=1, bias=False) def forward(self, x): x = self.depthwise_conv(x) output = self.pointwise_conv(x) return", "return x class Block4(nn.Module): ''' Definition of Block 4. ''' def __init__(self, in_channels):", "= self.bn2(x) x = torch.relu(x) return x class Block2(nn.Module): ''' Definition of Block", "if num_middle_layers != 0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)]) self.block5 =", "(3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256, 256, (3, 3),", "x = self.block6(x) x = self.avg(x) x = x.view(x.size(0), -1) x = self.final(x)", "super(Block1, self).__init__() self.conv1 = nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32)", "stride=(2, 2), padding=1) self.out_channels = 128 def forward(self, x): # Shortcut rx =", "bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels = in_channels * depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels,", "self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, (3, 3), padding=1, bias=False) self.bn2 =", "1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3),", "nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 256 def forward(self, x): # Shortcut", "self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels) else: self.block5 =", "self.conv1(x) x = self.bn1(x) x = torch.relu(x) x = self.conv2(x) x = self.bn2(x)", "Network(nn.Module): ''' Definition of the whole network with Block[1-6] utilized. ''' def __init__(self,", "3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128, 128, (3, 3), padding=1,", "self.r_bn1 = nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 =", "(1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels, 256, (3,", "= nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(512) self.conv1 =", "__init__(self, in_channels): super(Block3, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2), bias=False)", "self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(1024) self.conv2 =", "f'Invalid number of layers, {num_middle_layers}' if num_middle_layers != 0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for", "def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x", "forward(self, x): # Shortcut rx = x # Main way x = torch.relu(x)", "512 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx) #", "x + rx return x class Block4(nn.Module): ''' Definition of Block 4. '''", "length and high x = self.block6(x) x = self.avg(x) x = x.view(x.size(0), -1)", "network with Block[1-6] utilized. ''' def __init__(self, in_channels, num_classes, num_middle_layers=4): super(Network, self).__init__() self.block1", "padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, (3, 3), padding=1, bias=False)", "x = x + rx return x class Block3(nn.Module): ''' Definition of Block", "__init__(self, in_channels): super(Block2, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2), bias=False)", "= self.conv2(x) x = self.bn2(x) x = torch.relu(x) return x class Network(nn.Module): '''", "and high x = self.block3(x) # half-sized length and high for i in", "nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False) def forward(self, x): x = self.depthwise_conv(x) output =", "number of layers, {num_middle_layers}' if num_middle_layers != 0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _", "# Shortcut rx = x # Main way x = torch.relu(x) x =", "= self.depthwise_conv(x) output = self.pointwise_conv(x) return output class Block1(nn.Module): ''' Definition of Block", "self.bn2 = nn.BatchNorm2d(2048) self.out_channels = 2048 def forward(self, x): x = self.conv1(x) x", "Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx) # Main way x = torch.relu(x)", "nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 512 def forward(self,", "def __init__(self, in_channels): super(Block5, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2),", "num_middle_layers != 0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels)", "self.mp3(x) # Confluence x = x + rx return x class Block3(nn.Module): '''", "512, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels, 256,", "def __init__(self, in_channels, num_classes, num_middle_layers=4): super(Network, self).__init__() self.block1 = Block1(in_channels) self.block2 = Block2(self.block1.out_channels)", "= Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels, num_classes) def", "import torch.nn as nn class SeparableConv2D(nn.Module): ''' Definition of Separable Convolution. ''' def", "256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3, 3), stride=(2,", "SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256,", "def forward(self, x): # Shortcut rx = x # Main way x =", "SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024, 2048,", "nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(2048) self.out_channels", "class Block6(nn.Module): ''' Definition of Block 6. ''' def __init__(self, in_channels): super(Block6, self).__init__()", "nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64,", "self.out_channels = 64 def forward(self, x): x = self.conv1(x) x = self.bn1(x) x", "nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels,", "rx = x # Main way x = torch.relu(x) x = self.conv1(x) x", "(1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels, 256, (3,", "256 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx) #", "= nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels, num_classes) def forward(self, x): x = self.block1(x) x", "256 def forward(self, x): # Shortcut rx = x # Main way x", "in_channels): super(Block1, self).__init__() self.conv1 = nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False) self.bn1 =", "out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels = in_channels", "* depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode)", "''' Definition of Block 2. ''' def __init__(self, in_channels): super(Block2, self).__init__() self.r_conv1 =", "nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 512 def forward(self, x): # Shortcut", "self).__init__() self.conv1 = nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2", "= nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 128 def", "= torch.relu(x) x = self.conv3(x) x = self.bn3(x) # Confluence x = x", "padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False)", "half-sized length and high for i in range(len(self.block4_lst)): x = self.block4_lst[i](x) x =", "= self.block3(x) # half-sized length and high for i in range(len(self.block4_lst)): x =", "of layers, {num_middle_layers}' if num_middle_layers != 0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in", "self.depthwise_conv(x) output = self.pointwise_conv(x) return output class Block1(nn.Module): ''' Definition of Block 1.", "= self.pointwise_conv(x) return output class Block1(nn.Module): ''' Definition of Block 1. ''' def", "''' Definition of Block 5. ''' def __init__(self, in_channels): super(Block5, self).__init__() self.r_conv1 =", "and high x = self.block6(x) x = self.avg(x) x = x.view(x.size(0), -1) x", "forward(self, x): # Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx) # Main way", "256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256, 256, (3,", "self.bn3 = nn.BatchNorm2d(256) self.out_channels = 256 def forward(self, x): # Shortcut rx =", "nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.conv3", "3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1,", "Block 4. ''' def __init__(self, in_channels): super(Block4, self).__init__() self.conv1 = SeparableConv2D(in_channels, 256, (3,", "self.block6(x) x = self.avg(x) x = x.view(x.size(0), -1) x = self.final(x) return x", "in_channels): super(Block6, self).__init__() self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False) self.bn1 =", "= SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256,", "torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = torch.relu(x) return x class", "Confluence x = x + rx return x class Block4(nn.Module): ''' Definition of", "Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx) # Main way x = self.conv1(x)", "in range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels) else: self.block5 = Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels) self.avg", "padding=0, dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels = in_channels * depth_multiplier self.depthwise_conv =", "3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 512, (3, 3), padding=1,", "x = torch.relu(x) x = self.conv3(x) x = self.bn3(x) # Confluence x =", "2), bias=False) self.r_bn1 = nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False)", "= nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 512 def forward(self, x): #", "= torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = torch.relu(x) return x", "= nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64)", "= nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128, 128, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(128)", "nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 256 def forward(self,", "stride=(2, 2), padding=1) self.out_channels = 512 def forward(self, x): # Shortcut rx =", "self.bn2(x) x = torch.relu(x) return x class Network(nn.Module): ''' Definition of the whole", "bias=False) def forward(self, x): x = self.depthwise_conv(x) output = self.pointwise_conv(x) return output class", "4. ''' def __init__(self, in_channels): super(Block4, self).__init__() self.conv1 = SeparableConv2D(in_channels, 256, (3, 3),", "assert num_middle_layers >= 0, f'Invalid number of layers, {num_middle_layers}' if num_middle_layers != 0:", "padding=1, bias=False) self.bn2 = nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels", "1024, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024, 2048, (3,", "in_channels * depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias,", "2048, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(2048) self.out_channels = 2048 def forward(self,", "x class Block2(nn.Module): ''' Definition of Block 2. ''' def __init__(self, in_channels): super(Block2,", "super(Block4, self).__init__() self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256)", "Block[1-6] utilized. ''' def __init__(self, in_channels, num_classes, num_middle_layers=4): super(Network, self).__init__() self.block1 = Block1(in_channels)", "super(SeparableConv2D, self).__init__() depthwise_conv_out_channels = in_channels * depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride,", "Block1(nn.Module): ''' Definition of Block 1. ''' def __init__(self, in_channels): super(Block1, self).__init__() self.conv1", "nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels,", "512, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3, 3), stride=(2,", "Shortcut rx = x # Main way x = torch.relu(x) x = self.conv1(x)", "x = self.conv2(x) x = self.bn2(x) x = torch.relu(x) x = self.conv3(x) x", "nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 512, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(512) self.mp3", "0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels) else: self.block5", "of Block 4. ''' def __init__(self, in_channels): super(Block4, self).__init__() self.conv1 = SeparableConv2D(in_channels, 256,", "super(Block3, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2), bias=False) self.r_bn1 =", "self.bn2(x) x = torch.relu(x) x = self.conv3(x) x = self.bn3(x) # Confluence x", "of Separable Convolution. ''' def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1,", "class Block1(nn.Module): ''' Definition of Block 1. ''' def __init__(self, in_channels): super(Block1, self).__init__()", "self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 128 def forward(self, x):", "Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels, num_classes) def forward(self,", "= nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 256 def forward(self, x): #", "= Block3(self.block2.out_channels) assert num_middle_layers >= 0, f'Invalid number of layers, {num_middle_layers}' if num_middle_layers", "128, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels, 128,", "and high for i in range(len(self.block4_lst)): x = self.block4_lst[i](x) x = self.block5(x) #", "Definition of Block 6. ''' def __init__(self, in_channels): super(Block6, self).__init__() self.conv1 = SeparableConv2D(in_channels,", "Block5(nn.Module): ''' Definition of Block 5. ''' def __init__(self, in_channels): super(Block5, self).__init__() self.r_conv1", "x = x + rx return x class Block4(nn.Module): ''' Definition of Block", "class Block5(nn.Module): ''' Definition of Block 5. ''' def __init__(self, in_channels): super(Block5, self).__init__()", "num_classes) def forward(self, x): x = self.block1(x) x = self.block2(x) # half-sized length", "rx = self.r_bn1(rx) # Main way x = self.conv1(x) x = self.bn1(x) x", "{num_middle_layers}' if num_middle_layers != 0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)]) self.block5", "= torch.relu(x) return x class Network(nn.Module): ''' Definition of the whole network with", "''' def __init__(self, in_channels): super(Block3, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1), stride=(2,", "Convolution. ''' def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'):", "rx = self.r_conv1(x) rx = self.r_bn1(rx) # Main way x = self.conv1(x) x", "x): x = self.block1(x) x = self.block2(x) # half-sized length and high x", "rx return x class Block4(nn.Module): ''' Definition of Block 4. ''' def __init__(self,", "= nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False) def forward(self, x): x = self.depthwise_conv(x) output", "= self.conv2(x) x = self.bn2(x) x = self.mp3(x) # Confluence x = x", "forward(self, x): x = self.block1(x) x = self.block2(x) # half-sized length and high", "= torch.relu(x) return x class Block2(nn.Module): ''' Definition of Block 2. ''' def", "return x class Block6(nn.Module): ''' Definition of Block 6. ''' def __init__(self, in_channels):", "# half-sized length and high x = self.block6(x) x = self.avg(x) x =", "128, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128, 128, (3,", "''' def __init__(self, in_channels): super(Block6, self).__init__() self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3), padding=1,", "self).__init__() self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2", "range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels) else: self.block5 = Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels) self.avg =", "= nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 128 def forward(self, x): #", "half-sized length and high x = self.block6(x) x = self.avg(x) x = x.view(x.size(0),", "= nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32,", "x): x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x = self.conv2(x)", "Block 2. ''' def __init__(self, in_channels): super(Block2, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 128, (1,", "= self.mp3(x) # Confluence x = x + rx return x class Block3(nn.Module):", "self.block5 = Block5(self.block4_lst[0].out_channels) else: self.block5 = Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1)", "bias=False) self.bn2 = nn.BatchNorm2d(64) self.out_channels = 64 def forward(self, x): x = self.conv1(x)", "self.block6 = Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels, num_classes) def forward(self, x):", "torch.relu(x) x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x = self.conv2(x)", "(1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels, 128, (3,", "x = self.block1(x) x = self.block2(x) # half-sized length and high x =", "torch.relu(x) return x class Block2(nn.Module): ''' Definition of Block 2. ''' def __init__(self,", "stride=1, bias=False) def forward(self, x): x = self.depthwise_conv(x) output = self.pointwise_conv(x) return output", "def __init__(self, in_channels): super(Block3, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2),", "= in_channels * depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels,", "= self.r_bn1(rx) # Main way x = self.conv1(x) x = self.bn1(x) x =", "= self.bn1(x) x = torch.relu(x) x = self.conv2(x) x = self.bn2(x) x =", "self.conv2 = SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(2048) self.out_channels =", "(3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, (3, 3),", "padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False) def forward(self, x): x =", "(3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 512, (3, 3),", "self.conv2 = nn.Conv2d(32, 64, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64) self.out_channels =", "in range(len(self.block4_lst)): x = self.block4_lst[i](x) x = self.block5(x) # half-sized length and high", "2. ''' def __init__(self, in_channels): super(Block2, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1),", "nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2", "padding=1, bias=False) self.bn1 = nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False)", "self.block2(x) # half-sized length and high x = self.block3(x) # half-sized length and", "= nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256)", "256, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels, 256,", "torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = torch.relu(x) x = self.conv3(x)", "in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels =", "''' def __init__(self, in_channels): super(Block1, self).__init__() self.conv1 = nn.Conv2d(in_channels, 32, (3, 3), padding=1,", "bias=False) self.bn2 = nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels =", "x = self.block2(x) # half-sized length and high x = self.block3(x) # half-sized", "Block 1. ''' def __init__(self, in_channels): super(Block1, self).__init__() self.conv1 = nn.Conv2d(in_channels, 32, (3,", "out_channels, kernel_size=1, stride=1, bias=False) def forward(self, x): x = self.depthwise_conv(x) output = self.pointwise_conv(x)", "bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, (3, 3), padding=1, bias=False) self.bn2", "nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 128 def forward(self,", "Confluence x = x + rx return x class Block3(nn.Module): ''' Definition of", "depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1,", "class Block4(nn.Module): ''' Definition of Block 4. ''' def __init__(self, in_channels): super(Block4, self).__init__()", "self).__init__() depthwise_conv_out_channels = in_channels * depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding,", "def __init__(self, in_channels): super(Block1, self).__init__() self.conv1 = nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False)", "nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels,", "3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)", "64, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64) self.out_channels = 64 def forward(self,", "self.conv2(x) x = self.bn2(x) x = torch.relu(x) x = self.conv3(x) x = self.bn3(x)", "= SeparableConv2D(256, 512, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3,", "= x + rx return x class Block6(nn.Module): ''' Definition of Block 6.", "bias=False) self.bn1 = nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False) self.bn2", "(3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64) self.out_channels = 64 def forward(self, x):", "Block3(nn.Module): ''' Definition of Block 3. ''' def __init__(self, in_channels): super(Block3, self).__init__() self.r_conv1", "Confluence x = x + rx return x class Block6(nn.Module): ''' Definition of", "padding=1) self.out_channels = 256 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx", "# Confluence x = x + rx return x class Block4(nn.Module): ''' Definition", "x = self.bn2(x) x = torch.relu(x) return x class Block2(nn.Module): ''' Definition of", "x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x = self.conv2(x) x", "nn.BatchNorm2d(64) self.out_channels = 64 def forward(self, x): x = self.conv1(x) x = self.bn1(x)", "__init__(self, in_channels): super(Block5, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2), bias=False)", "self.mp3(x) # Confluence x = x + rx return x class Block4(nn.Module): '''", "x = torch.relu(x) return x class Block2(nn.Module): ''' Definition of Block 2. '''", "Definition of Block 2. ''' def __init__(self, in_channels): super(Block2, self).__init__() self.r_conv1 = nn.Conv2d(in_channels,", "return x class Block2(nn.Module): ''' Definition of Block 2. ''' def __init__(self, in_channels):", "self).__init__() self.block1 = Block1(in_channels) self.block2 = Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels) assert num_middle_layers >=", "__init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels", "forward(self, x): x = self.depthwise_conv(x) output = self.pointwise_conv(x) return output class Block1(nn.Module): '''", "# Main way x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x", "= 512 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx)", "''' def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D,", "kernel_size, stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1,", "padding=1, bias=False) self.bn1 = nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128, 128, (3, 3), padding=1, bias=False)", "x = self.bn2(x) x = torch.relu(x) x = self.conv3(x) x = self.bn3(x) #", "forward(self, x): x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x =", "self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(128) self.conv1", "self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 =", "3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024, 2048, (3, 3), padding=1,", "x = x + rx return x class Block6(nn.Module): ''' Definition of Block", "x class Block3(nn.Module): ''' Definition of Block 3. ''' def __init__(self, in_channels): super(Block3,", "= self.block4_lst[i](x) x = self.block5(x) # half-sized length and high x = self.block6(x)", "Definition of Block 3. ''' def __init__(self, in_channels): super(Block3, self).__init__() self.r_conv1 = nn.Conv2d(in_channels,", "self.block3 = Block3(self.block2.out_channels) assert num_middle_layers >= 0, f'Invalid number of layers, {num_middle_layers}' if", "import torch import torch.nn as nn class SeparableConv2D(nn.Module): ''' Definition of Separable Convolution.", "x + rx return x class Block6(nn.Module): ''' Definition of Block 6. '''", "x = self.block3(x) # half-sized length and high for i in range(len(self.block4_lst)): x", "depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels = in_channels * depth_multiplier", "nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64) self.out_channels", "= x + rx return x class Block5(nn.Module): ''' Definition of Block 5.", "num_middle_layers=4): super(Network, self).__init__() self.block1 = Block1(in_channels) self.block2 = Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels) assert", "Block6(nn.Module): ''' Definition of Block 6. ''' def __init__(self, in_channels): super(Block6, self).__init__() self.conv1", "SeparableConv2D(128, 128, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3, 3),", "dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False) def forward(self,", "self.block2 = Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels) assert num_middle_layers >= 0, f'Invalid number of", "def __init__(self, in_channels): super(Block4, self).__init__() self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False)", "= nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels) else: self.block5 = Block5(self.block3.out_channels)", "self.conv2 = SeparableConv2D(256, 512, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(512) self.mp3 =", "stride=(2, 2), padding=1) self.out_channels = 256 def forward(self, x): # Shortcut rx =", "= x + rx return x class Block3(nn.Module): ''' Definition of Block 3.", "= nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 512, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(512)", "Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels) assert num_middle_layers >= 0, f'Invalid number of layers, {num_middle_layers}'", "Separable Convolution. ''' def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True,", "layers, {num_middle_layers}' if num_middle_layers != 0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)])", "= self.block6(x) x = self.avg(x) x = x.view(x.size(0), -1) x = self.final(x) return", "padding=1, bias=False) self.bn3 = nn.BatchNorm2d(256) self.out_channels = 256 def forward(self, x): # Shortcut", "bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 512, (3, 3), padding=1, bias=False) self.bn2", "stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1,", "in_channels): super(Block2, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2), bias=False) self.r_bn1", "return x class Network(nn.Module): ''' Definition of the whole network with Block[1-6] utilized.", "for i in range(len(self.block4_lst)): x = self.block4_lst[i](x) x = self.block5(x) # half-sized length", "x = self.block4_lst[i](x) x = self.block5(x) # half-sized length and high x =", "x + rx return x class Block5(nn.Module): ''' Definition of Block 5. '''", "of Block 2. ''' def __init__(self, in_channels): super(Block2, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 128,", "256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256, (3,", "+ rx return x class Block3(nn.Module): ''' Definition of Block 3. ''' def", "def __init__(self, in_channels): super(Block2, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2),", "bias=False) self.r_bn1 = nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1", "= nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256)", "rx return x class Block5(nn.Module): ''' Definition of Block 5. ''' def __init__(self,", "= nn.BatchNorm2d(2048) self.out_channels = 2048 def forward(self, x): x = self.conv1(x) x =", "5. ''' def __init__(self, in_channels): super(Block5, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1),", "x = self.conv3(x) x = self.bn3(x) # Confluence x = x + rx", "Definition of Separable Convolution. ''' def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0,", "x = self.block5(x) # half-sized length and high x = self.block6(x) x =", "padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64) self.out_channels = 64 def forward(self, x): x =", "self.r_bn1(rx) # Main way x = torch.relu(x) x = self.conv1(x) x = self.bn1(x)", "self.bn2 = nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn3 =", "groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False) def forward(self, x):", "''' Definition of Block 4. ''' def __init__(self, in_channels): super(Block4, self).__init__() self.conv1 =", "3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, (3, 3), padding=1,", "= SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024,", "x = torch.relu(x) return x class Network(nn.Module): ''' Definition of the whole network", "padding=1) self.out_channels = 512 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx", "rx = self.r_bn1(rx) # Main way x = torch.relu(x) x = self.conv1(x) x", "bias=False) self.bn1 = nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128, 128, (3, 3), padding=1, bias=False) self.bn2", "bias=bias, padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False) def forward(self, x): x", "1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3),", "= x + rx return x class Block4(nn.Module): ''' Definition of Block 4.", "x = torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = torch.relu(x) return", "self.conv3 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn3 = nn.BatchNorm2d(256) self.out_channels =", "class Network(nn.Module): ''' Definition of the whole network with Block[1-6] utilized. ''' def", "!= 0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels) else:", "(3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256, (3, 3),", "output class Block1(nn.Module): ''' Definition of Block 1. ''' def __init__(self, in_channels): super(Block1,", "256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 512, (3,", "bias=False) self.r_bn1 = nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False) self.bn1", "''' def __init__(self, in_channels): super(Block2, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1), stride=(2,", "128, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3, 3), stride=(2,", "= Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels, num_classes) def forward(self, x): x", "3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(2048) self.out_channels = 2048 def forward(self, x): x", "3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)", "x = self.bn1(x) x = torch.relu(x) x = self.conv2(x) x = self.bn2(x) x", "2), padding=1) self.out_channels = 256 def forward(self, x): # Shortcut rx = self.r_conv1(x)", "padding=1) self.out_channels = 128 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx", "= self.r_conv1(x) rx = self.r_bn1(rx) # Main way x = torch.relu(x) x =", "= nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(128)", "self.r_bn1 = nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False) self.bn1 =", "self.conv1 = nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 =", "range(len(self.block4_lst)): x = self.block4_lst[i](x) x = self.block5(x) # half-sized length and high x", "= self.r_bn1(rx) # Main way x = torch.relu(x) x = self.conv1(x) x =", "self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 =", "= 256 def forward(self, x): # Shortcut rx = x # Main way", "rx return x class Block3(nn.Module): ''' Definition of Block 3. ''' def __init__(self,", "torch import torch.nn as nn class SeparableConv2D(nn.Module): ''' Definition of Separable Convolution. '''", "bias=False) self.bn2 = nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels =", "in_channels, num_classes, num_middle_layers=4): super(Network, self).__init__() self.block1 = Block1(in_channels) self.block2 = Block2(self.block1.out_channels) self.block3 =", "# Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx) # Main way x =", "bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2", "torch.relu(x) return x class Network(nn.Module): ''' Definition of the whole network with Block[1-6]", "= Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels) assert num_middle_layers >= 0, f'Invalid number of layers,", "self.pointwise_conv(x) return output class Block1(nn.Module): ''' Definition of Block 1. ''' def __init__(self,", "Block1(in_channels) self.block2 = Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels) assert num_middle_layers >= 0, f'Invalid number", "nn.BatchNorm2d(2048) self.out_channels = 2048 def forward(self, x): x = self.conv1(x) x = self.bn1(x)", "x = torch.relu(x) x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x", "SeparableConv2D(nn.Module): ''' Definition of Separable Convolution. ''' def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1,", "class SeparableConv2D(nn.Module): ''' Definition of Separable Convolution. ''' def __init__(self, in_channels, out_channels, kernel_size,", "rx = self.r_conv1(x) rx = self.r_bn1(rx) # Main way x = torch.relu(x) x", "= self.conv3(x) x = self.bn3(x) # Confluence x = x + rx return", "self.conv2 = SeparableConv2D(128, 128, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(128) self.mp3 =", "x = self.mp3(x) # Confluence x = x + rx return x class", "utilized. ''' def __init__(self, in_channels, num_classes, num_middle_layers=4): super(Network, self).__init__() self.block1 = Block1(in_channels) self.block2", "class Block2(nn.Module): ''' Definition of Block 2. ''' def __init__(self, in_channels): super(Block2, self).__init__()", "Block2(nn.Module): ''' Definition of Block 2. ''' def __init__(self, in_channels): super(Block2, self).__init__() self.r_conv1", "1. ''' def __init__(self, in_channels): super(Block1, self).__init__() self.conv1 = nn.Conv2d(in_channels, 32, (3, 3),", "self.bn2(x) x = self.mp3(x) # Confluence x = x + rx return x", "nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(128) self.conv2", "SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3, 3),", "the whole network with Block[1-6] utilized. ''' def __init__(self, in_channels, num_classes, num_middle_layers=4): super(Network,", "__init__(self, in_channels): super(Block4, self).__init__() self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1", "x class Block5(nn.Module): ''' Definition of Block 5. ''' def __init__(self, in_channels): super(Block5,", "3), stride=(2, 2), padding=1) self.out_channels = 128 def forward(self, x): # Shortcut rx", "self.mp3(x) # Confluence x = x + rx return x class Block6(nn.Module): '''", "self.block4_lst[i](x) x = self.block5(x) # half-sized length and high x = self.block6(x) x", "torch.nn as nn class SeparableConv2D(nn.Module): ''' Definition of Separable Convolution. ''' def __init__(self,", "2), padding=1) self.out_channels = 128 def forward(self, x): # Shortcut rx = self.r_conv1(x)", "class Block3(nn.Module): ''' Definition of Block 3. ''' def __init__(self, in_channels): super(Block3, self).__init__()", "of Block 6. ''' def __init__(self, in_channels): super(Block6, self).__init__() self.conv1 = SeparableConv2D(in_channels, 1024,", "self.r_bn1 = nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 =", "x): x = self.depthwise_conv(x) output = self.pointwise_conv(x) return output class Block1(nn.Module): ''' Definition", "= x # Main way x = torch.relu(x) x = self.conv1(x) x =", "= SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128,", "nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels, num_classes) def forward(self, x): x = self.block1(x) x =", "x class Network(nn.Module): ''' Definition of the whole network with Block[1-6] utilized. '''", "def forward(self, x): x = self.block1(x) x = self.block2(x) # half-sized length and", "= nn.BatchNorm2d(256) self.out_channels = 256 def forward(self, x): # Shortcut rx = x", "self.block5(x) # half-sized length and high x = self.block6(x) x = self.avg(x) x", "self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(256)", "stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1,", "x class Block4(nn.Module): ''' Definition of Block 4. ''' def __init__(self, in_channels): super(Block4,", "= self.mp3(x) # Confluence x = x + rx return x class Block4(nn.Module):", "nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels) else: self.block5 = Block5(self.block3.out_channels) self.block6", "= 128 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx)", "(3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(2048) self.out_channels = 2048 def forward(self, x):", "for _ in range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels) else: self.block5 = Block5(self.block3.out_channels) self.block6 =", "(3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128, 128, (3, 3),", "bias=False) self.bn2 = nn.BatchNorm2d(2048) self.out_channels = 2048 def forward(self, x): x = self.conv1(x)", "_ in range(num_middle_layers)]) self.block5 = Block5(self.block4_lst[0].out_channels) else: self.block5 = Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels)", "# Main way x = torch.relu(x) x = self.conv1(x) x = self.bn1(x) x", "torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.mp3(x) # Confluence x", "64 def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = torch.relu(x)", "0, f'Invalid number of layers, {num_middle_layers}' if num_middle_layers != 0: self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels)", "3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)", "# Confluence x = x + rx return x class Block6(nn.Module): ''' Definition", "3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256, 256, (3, 3), padding=1,", "= nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(128) self.conv1 =", "padding=1, bias=False) self.bn2 = nn.BatchNorm2d(2048) self.out_channels = 2048 def forward(self, x): x =", "output = self.pointwise_conv(x) return output class Block1(nn.Module): ''' Definition of Block 1. '''", "self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.mp3 =", "x = self.bn2(x) x = self.mp3(x) # Confluence x = x + rx", "of Block 3. ''' def __init__(self, in_channels): super(Block3, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 256,", "self.conv2(x) x = self.bn2(x) x = self.mp3(x) # Confluence x = x +", "3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64) self.out_channels = 64 def forward(self, x): x", "way x = torch.relu(x) x = self.conv1(x) x = self.bn1(x) x = torch.relu(x)", "128 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx) #", "def forward(self, x): x = self.depthwise_conv(x) output = self.pointwise_conv(x) return output class Block1(nn.Module):", "bias=False) self.bn2 = nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels =", "self.bn3(x) # Confluence x = x + rx return x class Block5(nn.Module): '''", "= torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.mp3(x) # Confluence", "bias=False) self.r_bn1 = nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1", "= SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn3 = nn.BatchNorm2d(256) self.out_channels = 256", "= torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = torch.relu(x) x =", "''' Definition of Block 1. ''' def __init__(self, in_channels): super(Block1, self).__init__() self.conv1 =", "nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn3 = nn.BatchNorm2d(256) self.out_channels", "= Block5(self.block4_lst[0].out_channels) else: self.block5 = Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1) self.final", "stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels = in_channels * depth_multiplier self.depthwise_conv", "# Confluence x = x + rx return x class Block5(nn.Module): ''' Definition", "self.final = nn.Linear(self.block6.out_channels, num_classes) def forward(self, x): x = self.block1(x) x = self.block2(x)", "2), bias=False) self.r_bn1 = nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False)", "stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels, 128, (3, 3), padding=1,", "256, (3, 3), padding=1, bias=False) self.bn3 = nn.BatchNorm2d(256) self.out_channels = 256 def forward(self,", "= Block1(in_channels) self.block2 = Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels) assert num_middle_layers >= 0, f'Invalid", "as nn class SeparableConv2D(nn.Module): ''' Definition of Separable Convolution. ''' def __init__(self, in_channels,", "3), stride=(2, 2), padding=1) self.out_channels = 256 def forward(self, x): # Shortcut rx", "= self.conv2(x) x = self.bn2(x) x = torch.relu(x) x = self.conv3(x) x =", "self.bn1 = nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128, 128, (3, 3), padding=1, bias=False) self.bn2 =", "''' def __init__(self, in_channels): super(Block4, self).__init__() self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1,", "self.bn2 = nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 256", "SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128, 128,", "__init__(self, in_channels): super(Block1, self).__init__() self.conv1 = nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False) self.bn1", "= nn.Linear(self.block6.out_channels, num_classes) def forward(self, x): x = self.block1(x) x = self.block2(x) #", "= SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(2048) self.out_channels = 2048", "def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__()", "__init__(self, in_channels): super(Block6, self).__init__() self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False) self.bn1", "kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels = in_channels *", "x + rx return x class Block3(nn.Module): ''' Definition of Block 3. '''", "SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(2048) self.out_channels = 2048 def", "self.bn2 = nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 512", "3. ''' def __init__(self, in_channels): super(Block3, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1),", "self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 512, (3, 3), padding=1, bias=False) self.bn2 =", "self.block1(x) x = self.block2(x) # half-sized length and high x = self.block3(x) #", "+ rx return x class Block4(nn.Module): ''' Definition of Block 4. ''' def", "def forward(self, x): # Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx) # Main", "self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(512) self.conv1", "= nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels,", "= torch.relu(x) x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x =", "(3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024, 2048, (3, 3),", "= nn.Conv2d(32, 64, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64) self.out_channels = 64", "+ rx return x class Block5(nn.Module): ''' Definition of Block 5. ''' def", "self.bn1(x) x = torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.mp3(x)", "Block3(self.block2.out_channels) assert num_middle_layers >= 0, f'Invalid number of layers, {num_middle_layers}' if num_middle_layers !=", "self.bn1(x) x = torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = torch.relu(x)", "of Block 5. ''' def __init__(self, in_channels): super(Block5, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 512,", "= self.block2(x) # half-sized length and high x = self.block3(x) # half-sized length", "= nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(256) self.conv1 =", "padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 512, (3, 3), padding=1, bias=False)", "= self.bn3(x) # Confluence x = x + rx return x class Block5(nn.Module):", "self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv =", "6. ''' def __init__(self, in_channels): super(Block6, self).__init__() self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3),", "= SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3,", "in_channels): super(Block5, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2), bias=False) self.r_bn1", "(3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2),", "SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256, 256,", "Block4(nn.Module): ''' Definition of Block 4. ''' def __init__(self, in_channels): super(Block4, self).__init__() self.conv1", "super(Block2, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2), bias=False) self.r_bn1 =", "self.conv3(x) x = self.bn3(x) # Confluence x = x + rx return x", "in_channels): super(Block3, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2), bias=False) self.r_bn1", "i in range(len(self.block4_lst)): x = self.block4_lst[i](x) x = self.block5(x) # half-sized length and", ">= 0, f'Invalid number of layers, {num_middle_layers}' if num_middle_layers != 0: self.block4_lst =", "Definition of Block 1. ''' def __init__(self, in_channels): super(Block1, self).__init__() self.conv1 = nn.Conv2d(in_channels,", "Definition of the whole network with Block[1-6] utilized. ''' def __init__(self, in_channels, num_classes,", "high x = self.block6(x) x = self.avg(x) x = x.view(x.size(0), -1) x =", "= 2048 def forward(self, x): x = self.conv1(x) x = self.bn1(x) x =", "SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn3 = nn.BatchNorm2d(256) self.out_channels = 256 def", "''' Definition of Block 3. ''' def __init__(self, in_channels): super(Block3, self).__init__() self.r_conv1 =", "(3, 3), padding=1, bias=False) self.bn3 = nn.BatchNorm2d(256) self.out_channels = 256 def forward(self, x):", "= self.conv1(x) x = self.bn1(x) x = torch.relu(x) x = self.conv2(x) x =", "self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(512)", "= SeparableConv2D(128, 128, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3,", "num_classes, num_middle_layers=4): super(Network, self).__init__() self.block1 = Block1(in_channels) self.block2 = Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels)", "rx return x class Block6(nn.Module): ''' Definition of Block 6. ''' def __init__(self,", "= SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256,", "1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(128) self.conv1 = SeparableConv2D(in_channels, 128, (3, 3),", "__init__(self, in_channels, num_classes, num_middle_layers=4): super(Network, self).__init__() self.block1 = Block1(in_channels) self.block2 = Block2(self.block1.out_channels) self.block3", "# half-sized length and high for i in range(len(self.block4_lst)): x = self.block4_lst[i](x) x", "Definition of Block 5. ''' def __init__(self, in_channels): super(Block5, self).__init__() self.r_conv1 = nn.Conv2d(in_channels,", "self.conv2(x) x = self.bn2(x) x = torch.relu(x) return x class Block2(nn.Module): ''' Definition", "nn.BatchNorm2d(512) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2", "nn.BatchNorm2d(128) self.conv2 = SeparableConv2D(128, 128, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(128) self.mp3", "length and high x = self.block3(x) # half-sized length and high for i", "padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels = in_channels * depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size,", "self.out_channels = 2048 def forward(self, x): x = self.conv1(x) x = self.bn1(x) x", "''' def __init__(self, in_channels, num_classes, num_middle_layers=4): super(Network, self).__init__() self.block1 = Block1(in_channels) self.block2 =", "self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(128)", "depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv", "= self.bn2(x) x = self.mp3(x) # Confluence x = x + rx return", "nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 128 def forward(self, x): # Shortcut", "self.avg = nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels, num_classes) def forward(self, x): x = self.block1(x)", "32, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, (3,", "x # Main way x = torch.relu(x) x = self.conv1(x) x = self.bn1(x)", "self.r_conv1(x) rx = self.r_bn1(rx) # Main way x = torch.relu(x) x = self.conv1(x)", "Block 6. ''' def __init__(self, in_channels): super(Block6, self).__init__() self.conv1 = SeparableConv2D(in_channels, 1024, (3,", "else: self.block5 = Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels,", "= nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256)", "nn.Linear(self.block6.out_channels, num_classes) def forward(self, x): x = self.block1(x) x = self.block2(x) # half-sized", "+ rx return x class Block6(nn.Module): ''' Definition of Block 6. ''' def", "self.block1 = Block1(in_channels) self.block2 = Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels) assert num_middle_layers >= 0,", "nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.mp3", "= nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn3 = nn.BatchNorm2d(256)", "x = self.bn2(x) x = torch.relu(x) return x class Network(nn.Module): ''' Definition of", "x = self.conv2(x) x = self.bn2(x) x = self.mp3(x) # Confluence x =", "half-sized length and high x = self.block3(x) # half-sized length and high for", "Main way x = torch.relu(x) x = self.conv1(x) x = self.bn1(x) x =", "2), bias=False) self.r_bn1 = nn.BatchNorm2d(256) self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False)", "whole network with Block[1-6] utilized. ''' def __init__(self, in_channels, num_classes, num_middle_layers=4): super(Network, self).__init__()", "SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 512,", "x = torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = torch.relu(x) x", "self.block3(x) # half-sized length and high for i in range(len(self.block4_lst)): x = self.block4_lst[i](x)", "self.conv2(x) x = self.bn2(x) x = torch.relu(x) return x class Network(nn.Module): ''' Definition", "super(Block5, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2), bias=False) self.r_bn1 =", "2048 def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = torch.relu(x)", "self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False) def forward(self, x): x = self.depthwise_conv(x)", "= self.conv2(x) x = self.bn2(x) x = torch.relu(x) return x class Block2(nn.Module): '''", "way x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x = self.conv2(x)", "def __init__(self, in_channels): super(Block6, self).__init__() self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False)", "length and high for i in range(len(self.block4_lst)): x = self.block4_lst[i](x) x = self.block5(x)", "high for i in range(len(self.block4_lst)): x = self.block4_lst[i](x) x = self.block5(x) # half-sized", "Confluence x = x + rx return x class Block5(nn.Module): ''' Definition of", "(3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2),", "self.r_conv1(x) rx = self.r_bn1(rx) # Main way x = self.conv1(x) x = self.bn1(x)", "= self.block1(x) x = self.block2(x) # half-sized length and high x = self.block3(x)", "padding=1, bias=False) self.bn1 = nn.BatchNorm2d(256) self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False)", "self.block5 = Block5(self.block3.out_channels) self.block6 = Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels, num_classes)", "self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 512 def forward(self, x):", "x): # Shortcut rx = x # Main way x = torch.relu(x) x", "2), padding=1) self.out_channels = 512 def forward(self, x): # Shortcut rx = self.r_conv1(x)", "= nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(2048)", "(3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2),", "''' Definition of Block 6. ''' def __init__(self, in_channels): super(Block6, self).__init__() self.conv1 =", "in_channels): super(Block4, self).__init__() self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False) self.bn1 =", "nn class SeparableConv2D(nn.Module): ''' Definition of Separable Convolution. ''' def __init__(self, in_channels, out_channels,", "# Confluence x = x + rx return x class Block3(nn.Module): ''' Definition", "super(Network, self).__init__() self.block1 = Block1(in_channels) self.block2 = Block2(self.block1.out_channels) self.block3 = Block3(self.block2.out_channels) assert num_middle_layers", "padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels", "self).__init__() self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(1024) self.conv2", "x class Block6(nn.Module): ''' Definition of Block 6. ''' def __init__(self, in_channels): super(Block6,", "= self.r_conv1(x) rx = self.r_bn1(rx) # Main way x = self.conv1(x) x =", "of the whole network with Block[1-6] utilized. ''' def __init__(self, in_channels, num_classes, num_middle_layers=4):", "3), stride=(2, 2), padding=1) self.out_channels = 512 def forward(self, x): # Shortcut rx", "= nn.BatchNorm2d(64) self.out_channels = 64 def forward(self, x): x = self.conv1(x) x =", "self.out_channels = 256 def forward(self, x): # Shortcut rx = x # Main", "= self.bn2(x) x = torch.relu(x) return x class Network(nn.Module): ''' Definition of the", "return x class Block3(nn.Module): ''' Definition of Block 3. ''' def __init__(self, in_channels):", "Definition of Block 4. ''' def __init__(self, in_channels): super(Block4, self).__init__() self.conv1 = SeparableConv2D(in_channels,", "self.bn2(x) x = torch.relu(x) return x class Block2(nn.Module): ''' Definition of Block 2.", "self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(256) self.conv3 =", "self.out_channels = 256 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx =", "x = torch.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.mp3(x) #", "x = self.depthwise_conv(x) output = self.pointwise_conv(x) return output class Block1(nn.Module): ''' Definition of", "torch.relu(x) x = self.conv3(x) x = self.bn3(x) # Confluence x = x +", "# half-sized length and high x = self.block3(x) # half-sized length and high", "padding=1, bias=False) self.bn2 = nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels", "self.bn1 = nn.BatchNorm2d(1024) self.conv2 = SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False) self.bn2 =", "= self.block5(x) # half-sized length and high x = self.block6(x) x = self.avg(x)", "depthwise_conv_out_channels = in_channels * depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation,", "= 64 def forward(self, x): x = self.conv1(x) x = self.bn1(x) x =", "stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode) self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False)", "bias=False) self.bn3 = nn.BatchNorm2d(256) self.out_channels = 256 def forward(self, x): # Shortcut rx", "return x class Block5(nn.Module): ''' Definition of Block 5. ''' def __init__(self, in_channels):", "x = x + rx return x class Block5(nn.Module): ''' Definition of Block", "= nn.BatchNorm2d(256) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 256 def", "''' Definition of the whole network with Block[1-6] utilized. ''' def __init__(self, in_channels,", "super(Block6, self).__init__() self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False) self.bn1 = nn.BatchNorm2d(1024)", "bias=False) self.bn2 = nn.BatchNorm2d(256) self.conv3 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False) self.bn3", "Block 5. ''' def __init__(self, in_channels): super(Block5, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 512, (1,", "x): # Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx) # Main way x", "= 256 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx = self.r_bn1(rx)", "3), padding=1, bias=False) self.bn3 = nn.BatchNorm2d(256) self.out_channels = 256 def forward(self, x): #", "self.bn2 = nn.BatchNorm2d(64) self.out_channels = 64 def forward(self, x): x = self.conv1(x) x", "dilation=1, bias=True, padding_mode='zeros'): super(SeparableConv2D, self).__init__() depthwise_conv_out_channels = in_channels * depth_multiplier self.depthwise_conv = nn.Conv2d(in_channels,", "Main way x = self.conv1(x) x = self.bn1(x) x = torch.relu(x) x =", "= self.bn2(x) x = torch.relu(x) x = self.conv3(x) x = self.bn3(x) # Confluence", "x = self.bn3(x) # Confluence x = x + rx return x class", "num_middle_layers >= 0, f'Invalid number of layers, {num_middle_layers}' if num_middle_layers != 0: self.block4_lst", "x = self.conv2(x) x = self.bn2(x) x = torch.relu(x) return x class Network(nn.Module):", "of Block 1. ''' def __init__(self, in_channels): super(Block1, self).__init__() self.conv1 = nn.Conv2d(in_channels, 32,", "self.out_channels = 128 def forward(self, x): # Shortcut rx = self.r_conv1(x) rx =", "self.bn2 = nn.BatchNorm2d(128) self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1) self.out_channels = 128", "''' def __init__(self, in_channels): super(Block5, self).__init__() self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1), stride=(2,", "self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2), bias=False) self.r_bn1 = nn.BatchNorm2d(256) self.conv1", "SeparableConv2D(256, 512, (3, 3), padding=1, bias=False) self.bn2 = nn.BatchNorm2d(512) self.mp3 = nn.MaxPool2d((3, 3),", "high x = self.block3(x) # half-sized length and high for i in range(len(self.block4_lst)):", "x = self.conv2(x) x = self.bn2(x) x = torch.relu(x) return x class Block2(nn.Module):", "''' Definition of Separable Convolution. ''' def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1,", "Block6(self.block5.out_channels) self.avg = nn.AdaptiveAvgPool2d(1) self.final = nn.Linear(self.block6.out_channels, num_classes) def forward(self, x): x =" ]
[]
[ "utf-8 -*- from __future__ import unicode_literals from django.apps import AppConfig class TaskConfig(AppConfig): name", "-*- from __future__ import unicode_literals from django.apps import AppConfig class TaskConfig(AppConfig): name =", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.apps import AppConfig", "from __future__ import unicode_literals from django.apps import AppConfig class TaskConfig(AppConfig): name = 'task'", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django.apps import AppConfig class", "coding: utf-8 -*- from __future__ import unicode_literals from django.apps import AppConfig class TaskConfig(AppConfig):" ]
[ "name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for entry in append_by_name_meta_data: # for extension in meta_data_extensions:", "name)): if count % 100 == 0: printTime(count, now) #i+=1 #if i >", "meta_data_list.append(entry+\"/\"+meta_data) #for entry in append_by_name_meta_data: # for extension in meta_data_extensions: # for filename", "entry in append_by_name_meta_data: # for extension in meta_data_extensions: # for filename in utils.getFileIfExistFrom(entry,", "print(\"eiiii youuuuu \") row_list = [] for i in range(len(folders)): path = folders[i]", "import utils import os import pandas as pd import units.unit as unit import", "jit @jit(cache=False, forceobj = True) def getMetaData(append_by_name_meta_data, meta_data_extensions): out = [] for entry", "if not name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name", "filename in utils.getFileIfExistFrom(entry, img_name + extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for (entry, extension) in", "a problem with bmp header that won't load on some libraries but with", "prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is not None: if shape == (1,1,3):", "None: if shape == (1,1,3): load_errors.append(path+\"/\"+name) continue else: if imgext not in image_extensions:", "@jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors): print(folders) now = time.time() print(\"eiiii", "time from numba import njit, jit @jit(cache=False, forceobj = True) def getMetaData(append_by_name_meta_data, meta_data_extensions):", "import pandas as pd import units.unit as unit import time from numba import", "entry in self.append_by_name_meta_data: # for meta_data in utils.getFileNamesFrom(entry): # name_meta, ext = os.path.splitext(meta_data)", "import time from numba import njit, jit @jit(cache=False, forceobj = True) def getMetaData(append_by_name_meta_data,", "[] for i in range(len(folders)): path = folders[i] print(\"new path:\") print(path) count =", "utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is not None: if shape == (1,1,3): load_errors.append(path+\"/\"+name) continue else:", "extension) if filename: meta_data_list.append(entry+\"/\"+filename) shape = (1,1) neww = [path, path+\"/\"+name, name, shape[0]", "folders[i] print(\"new path:\") print(path) count = 0 for name in os.listdir(path): if os.path.isfile(os.path.join(path,", "pd.Series(neww) #print(neww) count += 1 row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except", "+ extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for (entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions): filename =", "imageio import utils import os import pandas as pd import units.unit as unit", "in utils.getFileIfExistFrom(entry, img_name + extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for (entry, extension) in getMetaData(append_by_name_meta_data,", "path = folders[i] print(\"new path:\") print(path) count = 0 for name in os.listdir(path):", "return tuple(out) def printTime(length, now): print(length) print(\"Ellapsed: \" +str( time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions,", "(entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions): filename = utils.getFileIfExistFrom(entry, img_name + extension) if filename:", "pd import units.unit as unit import time from numba import njit, jit @jit(cache=False,", "if img_name == name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for entry in append_by_name_meta_data: # for", "#print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name = path+\"-\"+name #if", "append_by_name_meta_data: for extension in meta_data_extensions: out.append((entry, extension)) return tuple(out) def printTime(length, now): print(length)", "#i+=1 #if i > 5: # break img_name, imgext = os.path.splitext(name) shape= (1,1)", "images if there is a problem with bmp header that won't load on", "path+\"/\"+name, name, shape[0] * shape[1], shape[1], shape[0], meta_data_list] #row_df = pd.Series(neww) #print(neww) count", "entry in append_by_name_meta_data: for extension in meta_data_extensions: out.append((entry, extension)) return tuple(out) def printTime(length,", "os import pandas as pd import units.unit as unit import time from numba", "in os.listdir(path): if os.path.isfile(os.path.join(path, name)): if count % 100 == 0: printTime(count, now)", "#if i > 5: # break img_name, imgext = os.path.splitext(name) shape= (1,1) if", "imgext not in image_extensions: print(\"Not a file image: \", name) continue #print(path+\"/\"+name) #print(\"path:", "for entry in append_by_name_meta_data: for extension in meta_data_extensions: out.append((entry, extension)) return tuple(out) def", "#if prepend_category: # if not name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath =", "#print(\"path: \"+path) #savepath = context.origin+\"/\"+path+\"/\"+name #if prepend_category: # if not name.startswith(path): #print(\"preprend\") #", "if there is a problem with bmp header that won't load on some", "in getMetaData(append_by_name_meta_data, meta_data_extensions): filename = utils.getFileIfExistFrom(entry, img_name + extension) if filename: meta_data_list.append(entry+\"/\"+filename) shape", "utils.getFileNamesFrom(entry): # name_meta, ext = os.path.splitext(meta_data) # if img_name == name_meta: #print(meta_data) #", "imageio.imwrite(savepath, img) meta_data_list = [] #for entry in self.append_by_name_meta_data: # for meta_data in", "def getMetaData(append_by_name_meta_data, meta_data_extensions): out = [] for entry in append_by_name_meta_data: for extension in", "= [] for i in range(len(folders)): path = folders[i] print(\"new path:\") print(path) count", "os.path.splitext(name) shape= (1,1) if prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is not None:", "header that won't load on some libraries but with imageio # imageio.imwrite(savepath, img)", "# os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name = path+\"-\"+name #if fix_bmp:", "savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name = path+\"-\"+name #if fix_bmp: #resave images if there", "# for extension in meta_data_extensions: # for filename in utils.getFileIfExistFrom(entry, img_name + extension):", "i in range(len(folders)): path = folders[i] print(\"new path:\") print(path) count = 0 for", "path:\") print(path) count = 0 for name in os.listdir(path): if os.path.isfile(os.path.join(path, name)): if", "#df.loc[len(df.index)]= neww #except Exception as e: # print(e) # raise #print(context.origin+\"/\"+path+\"/\"+name) print(\"loaded: \",", "shape[1], shape[0], meta_data_list] #row_df = pd.Series(neww) #print(neww) count += 1 row_list.append(neww) #df.append(row_df, ignore_index=True)", "100 == 0: printTime(count, now) #i+=1 #if i > 5: # break img_name,", "forceobj = True) def getMetaData(append_by_name_meta_data, meta_data_extensions): out = [] for entry in append_by_name_meta_data:", "self.append_by_name_meta_data: # for meta_data in utils.getFileNamesFrom(entry): # name_meta, ext = os.path.splitext(meta_data) # if", "for extension in meta_data_extensions: out.append((entry, extension)) return tuple(out) def printTime(length, now): print(length) print(\"Ellapsed:", "context.origin+\"/\"+path+\"/\"+name #if prepend_category: # if not name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath", "if os.path.isfile(os.path.join(path, name)): if count % 100 == 0: printTime(count, now) #i+=1 #if", "#if fix_bmp: #resave images if there is a problem with bmp header that", "#resave images if there is a problem with bmp header that won't load", "= context.origin+\"/\"+path+\"/\"+name #if prepend_category: # if not name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) #", "#print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for entry in append_by_name_meta_data: # for extension in meta_data_extensions: #", "meta_data_extensions): filename = utils.getFileIfExistFrom(entry, img_name + extension) if filename: meta_data_list.append(entry+\"/\"+filename) shape = (1,1)", "now): print(length) print(\"Ellapsed: \" +str( time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions,", "name = path+\"-\"+name #if fix_bmp: #resave images if there is a problem with", "on some libraries but with imageio # imageio.imwrite(savepath, img) meta_data_list = [] #for", "print(\"Ellapsed: \" +str( time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors): print(folders)", "shape == (1,1,3): load_errors.append(path+\"/\"+name) continue else: if imgext not in image_extensions: print(\"Not a", "print(folders) now = time.time() print(\"eiiii youuuuu \") row_list = [] for i in", "a file image: \", name) continue #print(path+\"/\"+name) #print(\"path: \"+path) #savepath = context.origin+\"/\"+path+\"/\"+name #if", "continue else: if imgext not in image_extensions: print(\"Not a file image: \", name)", "# meta_data_list.append(entry+\"/\"+meta_data) #for entry in append_by_name_meta_data: # for extension in meta_data_extensions: # for", "name, shape[0] * shape[1], shape[1], shape[0], meta_data_list] #row_df = pd.Series(neww) #print(neww) count +=", "getMetaData(append_by_name_meta_data, meta_data_extensions): out = [] for entry in append_by_name_meta_data: for extension in meta_data_extensions:", "= context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name = path+\"-\"+name #if fix_bmp: #resave images if there is", "row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except Exception as e: # print(e)", "+str( time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors): print(folders) now =", "== 0: printTime(count, now) #i+=1 #if i > 5: # break img_name, imgext", "printTime(count, now) #i+=1 #if i > 5: # break img_name, imgext = os.path.splitext(name)", "prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors): print(folders) now = time.time() print(\"eiiii youuuuu \") row_list", "for filename in utils.getFileIfExistFrom(entry, img_name + extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for (entry, extension)", "utils import os import pandas as pd import units.unit as unit import time", "= utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is not None: if shape == (1,1,3): load_errors.append(path+\"/\"+name) continue", "in meta_data_extensions: # for filename in utils.getFileIfExistFrom(entry, img_name + extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename)", "extension in meta_data_extensions: out.append((entry, extension)) return tuple(out) def printTime(length, now): print(length) print(\"Ellapsed: \"", "load_errors is not None: if shape == (1,1,3): load_errors.append(path+\"/\"+name) continue else: if imgext", "# for meta_data in utils.getFileNamesFrom(entry): # name_meta, ext = os.path.splitext(meta_data) # if img_name", "for extension in meta_data_extensions: # for filename in utils.getFileIfExistFrom(entry, img_name + extension): #print(entry+\"/\"+filename)", "# imageio.imwrite(savepath, img) meta_data_list = [] #for entry in self.append_by_name_meta_data: # for meta_data", "with imageio # imageio.imwrite(savepath, img) meta_data_list = [] #for entry in self.append_by_name_meta_data: #", "# if img_name == name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for entry in append_by_name_meta_data: #", "filename = utils.getFileIfExistFrom(entry, img_name + extension) if filename: meta_data_list.append(entry+\"/\"+filename) shape = (1,1) neww", "imgext = os.path.splitext(name) shape= (1,1) if prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is", "shape= (1,1) if prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is not None: if", "+= 1 row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except Exception as e:", "continue #print(path+\"/\"+name) #print(\"path: \"+path) #savepath = context.origin+\"/\"+path+\"/\"+name #if prepend_category: # if not name.startswith(path):", "[path, path+\"/\"+name, name, shape[0] * shape[1], shape[1], shape[0], meta_data_list] #row_df = pd.Series(neww) #print(neww)", "def printTime(length, now): print(length) print(\"Ellapsed: \" +str( time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors, folders,", "image_extensions: print(\"Not a file image: \", name) continue #print(path+\"/\"+name) #print(\"path: \"+path) #savepath =", "os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name = path+\"-\"+name #if fix_bmp: #resave", "os.listdir(path): if os.path.isfile(os.path.join(path, name)): if count % 100 == 0: printTime(count, now) #i+=1", "os.path.isfile(os.path.join(path, name)): if count % 100 == 0: printTime(count, now) #i+=1 #if i", "fix_bmp: #resave images if there is a problem with bmp header that won't", "there is a problem with bmp header that won't load on some libraries", "meta_data_list.append(entry+\"/\"+filename) shape = (1,1) neww = [path, path+\"/\"+name, name, shape[0] * shape[1], shape[1],", "utils.getFileIfExistFrom(entry, img_name + extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for (entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions):", "meta_data_list] #row_df = pd.Series(neww) #print(neww) count += 1 row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True)", "count = 0 for name in os.listdir(path): if os.path.isfile(os.path.join(path, name)): if count %", "from numba import njit, jit @jit(cache=False, forceobj = True) def getMetaData(append_by_name_meta_data, meta_data_extensions): out", "@jit(cache=False, forceobj = True) def getMetaData(append_by_name_meta_data, meta_data_extensions): out = [] for entry in", "meta_data_extensions): out = [] for entry in append_by_name_meta_data: for extension in meta_data_extensions: out.append((entry,", "extension)) return tuple(out) def printTime(length, now): print(length) print(\"Ellapsed: \" +str( time.time()-now)) @jit(cache=True) def", "is a problem with bmp header that won't load on some libraries but", "pandas as pd import units.unit as unit import time from numba import njit,", "img_name == name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for entry in append_by_name_meta_data: # for extension", "#for entry in self.append_by_name_meta_data: # for meta_data in utils.getFileNamesFrom(entry): # name_meta, ext =", "problem with bmp header that won't load on some libraries but with imageio", "meta_data_extensions, load_errors): print(folders) now = time.time() print(\"eiiii youuuuu \") row_list = [] for", "extension) in getMetaData(append_by_name_meta_data, meta_data_extensions): filename = utils.getFileIfExistFrom(entry, img_name + extension) if filename: meta_data_list.append(entry+\"/\"+filename)", "name in os.listdir(path): if os.path.isfile(os.path.join(path, name)): if count % 100 == 0: printTime(count,", "shape = (1,1) neww = [path, path+\"/\"+name, name, shape[0] * shape[1], shape[1], shape[0],", "neww = [path, path+\"/\"+name, name, shape[0] * shape[1], shape[1], shape[0], meta_data_list] #row_df =", "count % 100 == 0: printTime(count, now) #i+=1 #if i > 5: #", "img) meta_data_list = [] #for entry in self.append_by_name_meta_data: # for meta_data in utils.getFileNamesFrom(entry):", "row_list = [] for i in range(len(folders)): path = folders[i] print(\"new path:\") print(path)", "# break img_name, imgext = os.path.splitext(name) shape= (1,1) if prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name)", "in meta_data_extensions: out.append((entry, extension)) return tuple(out) def printTime(length, now): print(length) print(\"Ellapsed: \" +str(", "0 for name in os.listdir(path): if os.path.isfile(os.path.join(path, name)): if count % 100 ==", "out = [] for entry in append_by_name_meta_data: for extension in meta_data_extensions: out.append((entry, extension))", "meta_data_extensions: # for filename in utils.getFileIfExistFrom(entry, img_name + extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for", "#row_df = pd.Series(neww) #print(neww) count += 1 row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]=", "\" +str( time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors): print(folders) now", "#df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except Exception as e: # print(e) #", "count += 1 row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except Exception as", "bmp header that won't load on some libraries but with imageio # imageio.imwrite(savepath,", "else: if imgext not in image_extensions: print(\"Not a file image: \", name) continue", "extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for (entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions): filename = utils.getFileIfExistFrom(entry,", "load_errors.append(path+\"/\"+name) continue else: if imgext not in image_extensions: print(\"Not a file image: \",", "if shape == (1,1,3): load_errors.append(path+\"/\"+name) continue else: if imgext not in image_extensions: print(\"Not", "print(length) print(\"Ellapsed: \" +str( time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors):", "in self.append_by_name_meta_data: # for meta_data in utils.getFileNamesFrom(entry): # name_meta, ext = os.path.splitext(meta_data) #", "in append_by_name_meta_data: for extension in meta_data_extensions: out.append((entry, extension)) return tuple(out) def printTime(length, now):", "# name_meta, ext = os.path.splitext(meta_data) # if img_name == name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data)", "os.path.splitext(meta_data) # if img_name == name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for entry in append_by_name_meta_data:", "path+\"-\"+name #if fix_bmp: #resave images if there is a problem with bmp header", "#print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except Exception as e: # print(e) # raise #print(context.origin+\"/\"+path+\"/\"+name)", "= os.path.splitext(meta_data) # if img_name == name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for entry in", "import imageio import utils import os import pandas as pd import units.unit as", "some libraries but with imageio # imageio.imwrite(savepath, img) meta_data_list = [] #for entry", "shape[1], shape[1], shape[0], meta_data_list] #row_df = pd.Series(neww) #print(neww) count += 1 row_list.append(neww) #df.append(row_df,", "0: printTime(count, now) #i+=1 #if i > 5: # break img_name, imgext =", "for i in range(len(folders)): path = folders[i] print(\"new path:\") print(path) count = 0", "name) continue #print(path+\"/\"+name) #print(\"path: \"+path) #savepath = context.origin+\"/\"+path+\"/\"+name #if prepend_category: # if not", "won't load on some libraries but with imageio # imageio.imwrite(savepath, img) meta_data_list =", "name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name = path+\"-\"+name", "shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is not None: if shape == (1,1,3): load_errors.append(path+\"/\"+name)", "= pd.Series(neww) #print(neww) count += 1 row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww", "not None: if shape == (1,1,3): load_errors.append(path+\"/\"+name) continue else: if imgext not in", "getMetaData(append_by_name_meta_data, meta_data_extensions): filename = utils.getFileIfExistFrom(entry, img_name + extension) if filename: meta_data_list.append(entry+\"/\"+filename) shape =", "for meta_data in utils.getFileNamesFrom(entry): # name_meta, ext = os.path.splitext(meta_data) # if img_name ==", "in range(len(folders)): path = folders[i] print(\"new path:\") print(path) count = 0 for name", "\", name) continue #print(path+\"/\"+name) #print(\"path: \"+path) #savepath = context.origin+\"/\"+path+\"/\"+name #if prepend_category: # if", "[] for entry in append_by_name_meta_data: for extension in meta_data_extensions: out.append((entry, extension)) return tuple(out)", "#print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for (entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions): filename = utils.getFileIfExistFrom(entry, img_name", "5: # break img_name, imgext = os.path.splitext(name) shape= (1,1) if prevent_load_errors: shape =", "= time.time() print(\"eiiii youuuuu \") row_list = [] for i in range(len(folders)): path", "if load_errors is not None: if shape == (1,1,3): load_errors.append(path+\"/\"+name) continue else: if", "def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors): print(folders) now = time.time() print(\"eiiii youuuuu", "= True) def getMetaData(append_by_name_meta_data, meta_data_extensions): out = [] for entry in append_by_name_meta_data: for", "image: \", name) continue #print(path+\"/\"+name) #print(\"path: \"+path) #savepath = context.origin+\"/\"+path+\"/\"+name #if prepend_category: #", "context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name = path+\"-\"+name #if fix_bmp: #resave images", "#for entry in append_by_name_meta_data: # for extension in meta_data_extensions: # for filename in", "+ extension) if filename: meta_data_list.append(entry+\"/\"+filename) shape = (1,1) neww = [path, path+\"/\"+name, name,", "print(\"Not a file image: \", name) continue #print(path+\"/\"+name) #print(\"path: \"+path) #savepath = context.origin+\"/\"+path+\"/\"+name", "(1,1) neww = [path, path+\"/\"+name, name, shape[0] * shape[1], shape[1], shape[0], meta_data_list] #row_df", "as pd import units.unit as unit import time from numba import njit, jit", "prepend_category: # if not name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name", "as unit import time from numba import njit, jit @jit(cache=False, forceobj = True)", "in image_extensions: print(\"Not a file image: \", name) continue #print(path+\"/\"+name) #print(\"path: \"+path) #savepath", "\") row_list = [] for i in range(len(folders)): path = folders[i] print(\"new path:\")", "# savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name = path+\"-\"+name #if fix_bmp: #resave images if", "append_by_name_meta_data: # for extension in meta_data_extensions: # for filename in utils.getFileIfExistFrom(entry, img_name +", "= os.path.splitext(name) shape= (1,1) if prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is not", "Exception as e: # print(e) # raise #print(context.origin+\"/\"+path+\"/\"+name) print(\"loaded: \", len(row_list)) print(load_errors) return", "#savepath = context.origin+\"/\"+path+\"/\"+name #if prepend_category: # if not name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name)", "# meta_data_list.append(entry+\"/\"+filename) for (entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions): filename = utils.getFileIfExistFrom(entry, img_name +", "extension in meta_data_extensions: # for filename in utils.getFileIfExistFrom(entry, img_name + extension): #print(entry+\"/\"+filename) #", "folders, append_by_name_meta_data, meta_data_extensions, load_errors): print(folders) now = time.time() print(\"eiiii youuuuu \") row_list =", "loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors): print(folders) now = time.time() print(\"eiiii youuuuu \")", "now) #i+=1 #if i > 5: # break img_name, imgext = os.path.splitext(name) shape=", "out.append((entry, extension)) return tuple(out) def printTime(length, now): print(length) print(\"Ellapsed: \" +str( time.time()-now)) @jit(cache=True)", "# for filename in utils.getFileIfExistFrom(entry, img_name + extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for (entry,", "% 100 == 0: printTime(count, now) #i+=1 #if i > 5: # break", "* shape[1], shape[1], shape[0], meta_data_list] #row_df = pd.Series(neww) #print(neww) count += 1 row_list.append(neww)", "time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data, meta_data_extensions, load_errors): print(folders) now = time.time()", "True) def getMetaData(append_by_name_meta_data, meta_data_extensions): out = [] for entry in append_by_name_meta_data: for extension", "with bmp header that won't load on some libraries but with imageio #", "load on some libraries but with imageio # imageio.imwrite(savepath, img) meta_data_list = []", "[] #for entry in self.append_by_name_meta_data: # for meta_data in utils.getFileNamesFrom(entry): # name_meta, ext", "meta_data_list.append(entry+\"/\"+filename) for (entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions): filename = utils.getFileIfExistFrom(entry, img_name + extension)", "time.time() print(\"eiiii youuuuu \") row_list = [] for i in range(len(folders)): path =", "load_errors): print(folders) now = time.time() print(\"eiiii youuuuu \") row_list = [] for i", "== (1,1,3): load_errors.append(path+\"/\"+name) continue else: if imgext not in image_extensions: print(\"Not a file", "utils.getFileIfExistFrom(entry, img_name + extension) if filename: meta_data_list.append(entry+\"/\"+filename) shape = (1,1) neww = [path,", "libraries but with imageio # imageio.imwrite(savepath, img) meta_data_list = [] #for entry in", "img_name, imgext = os.path.splitext(name) shape= (1,1) if prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors", "youuuuu \") row_list = [] for i in range(len(folders)): path = folders[i] print(\"new", "but with imageio # imageio.imwrite(savepath, img) meta_data_list = [] #for entry in self.append_by_name_meta_data:", "filename: meta_data_list.append(entry+\"/\"+filename) shape = (1,1) neww = [path, path+\"/\"+name, name, shape[0] * shape[1],", "#df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except Exception as e: # print(e) # raise #print(context.origin+\"/\"+path+\"/\"+name) print(\"loaded:", "1 row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except Exception as e: #", "# name = path+\"-\"+name #if fix_bmp: #resave images if there is a problem", "= 0 for name in os.listdir(path): if os.path.isfile(os.path.join(path, name)): if count % 100", "#except Exception as e: # print(e) # raise #print(context.origin+\"/\"+path+\"/\"+name) print(\"loaded: \", len(row_list)) print(load_errors)", "shape[0], meta_data_list] #row_df = pd.Series(neww) #print(neww) count += 1 row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df)", "(1,1,3): load_errors.append(path+\"/\"+name) continue else: if imgext not in image_extensions: print(\"Not a file image:", "if filename: meta_data_list.append(entry+\"/\"+filename) shape = (1,1) neww = [path, path+\"/\"+name, name, shape[0] *", "printTime(length, now): print(length) print(\"Ellapsed: \" +str( time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors, folders, append_by_name_meta_data,", "break img_name, imgext = os.path.splitext(name) shape= (1,1) if prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if", "imageio # imageio.imwrite(savepath, img) meta_data_list = [] #for entry in self.append_by_name_meta_data: # for", "name_meta, ext = os.path.splitext(meta_data) # if img_name == name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for", "shape[0] * shape[1], shape[1], shape[0], meta_data_list] #row_df = pd.Series(neww) #print(neww) count += 1", "<filename>units/jit_optimizations/image_loader_to_dataframe.py import imageio import utils import os import pandas as pd import units.unit", "units.unit as unit import time from numba import njit, jit @jit(cache=False, forceobj =", "= [] for entry in append_by_name_meta_data: for extension in meta_data_extensions: out.append((entry, extension)) return", "tuple(out) def printTime(length, now): print(length) print(\"Ellapsed: \" +str( time.time()-now)) @jit(cache=True) def loadDataframe(image_extensions, prevent_load_errors,", "# if not name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name #", "print(path) count = 0 for name in os.listdir(path): if os.path.isfile(os.path.join(path, name)): if count", "= path+\"-\"+name #if fix_bmp: #resave images if there is a problem with bmp", "print(\"new path:\") print(path) count = 0 for name in os.listdir(path): if os.path.isfile(os.path.join(path, name)):", "= folders[i] print(\"new path:\") print(path) count = 0 for name in os.listdir(path): if", "if imgext not in image_extensions: print(\"Not a file image: \", name) continue #print(path+\"/\"+name)", "= [] #for entry in self.append_by_name_meta_data: # for meta_data in utils.getFileNamesFrom(entry): # name_meta,", "ext = os.path.splitext(meta_data) # if img_name == name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for entry", "in append_by_name_meta_data: # for extension in meta_data_extensions: # for filename in utils.getFileIfExistFrom(entry, img_name", "file image: \", name) continue #print(path+\"/\"+name) #print(\"path: \"+path) #savepath = context.origin+\"/\"+path+\"/\"+name #if prepend_category:", "numba import njit, jit @jit(cache=False, forceobj = True) def getMetaData(append_by_name_meta_data, meta_data_extensions): out =", "append_by_name_meta_data, meta_data_extensions, load_errors): print(folders) now = time.time() print(\"eiiii youuuuu \") row_list = []", "in utils.getFileNamesFrom(entry): # name_meta, ext = os.path.splitext(meta_data) # if img_name == name_meta: #print(meta_data)", "not in image_extensions: print(\"Not a file image: \", name) continue #print(path+\"/\"+name) #print(\"path: \"+path)", "import njit, jit @jit(cache=False, forceobj = True) def getMetaData(append_by_name_meta_data, meta_data_extensions): out = []", "meta_data_extensions: out.append((entry, extension)) return tuple(out) def printTime(length, now): print(length) print(\"Ellapsed: \" +str( time.time()-now))", "\"+path) #savepath = context.origin+\"/\"+path+\"/\"+name #if prepend_category: # if not name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name,", "unit import time from numba import njit, jit @jit(cache=False, forceobj = True) def", "img_name + extension): #print(entry+\"/\"+filename) # meta_data_list.append(entry+\"/\"+filename) for (entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions): filename", "meta_data_list = [] #for entry in self.append_by_name_meta_data: # for meta_data in utils.getFileNamesFrom(entry): #", "import units.unit as unit import time from numba import njit, jit @jit(cache=False, forceobj", "is not None: if shape == (1,1,3): load_errors.append(path+\"/\"+name) continue else: if imgext not", "= [path, path+\"/\"+name, name, shape[0] * shape[1], shape[1], shape[0], meta_data_list] #row_df = pd.Series(neww)", "as e: # print(e) # raise #print(context.origin+\"/\"+path+\"/\"+name) print(\"loaded: \", len(row_list)) print(load_errors) return row_list", "i > 5: # break img_name, imgext = os.path.splitext(name) shape= (1,1) if prevent_load_errors:", "= (1,1) neww = [path, path+\"/\"+name, name, shape[0] * shape[1], shape[1], shape[0], meta_data_list]", "ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except Exception as e: # print(e) # raise", "neww #except Exception as e: # print(e) # raise #print(context.origin+\"/\"+path+\"/\"+name) print(\"loaded: \", len(row_list))", "meta_data in utils.getFileNamesFrom(entry): # name_meta, ext = os.path.splitext(meta_data) # if img_name == name_meta:", "if prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is not None: if shape ==", "= utils.getFileIfExistFrom(entry, img_name + extension) if filename: meta_data_list.append(entry+\"/\"+filename) shape = (1,1) neww =", "range(len(folders)): path = folders[i] print(\"new path:\") print(path) count = 0 for name in", "#print(path+\"/\"+name) #print(\"path: \"+path) #savepath = context.origin+\"/\"+path+\"/\"+name #if prepend_category: # if not name.startswith(path): #print(\"preprend\")", "(1,1) if prevent_load_errors: shape = utils.checkImageFromFileGetShape(path+\"/\"+name) if load_errors is not None: if shape", "== name_meta: #print(meta_data) # meta_data_list.append(entry+\"/\"+meta_data) #for entry in append_by_name_meta_data: # for extension in", "for (entry, extension) in getMetaData(append_by_name_meta_data, meta_data_extensions): filename = utils.getFileIfExistFrom(entry, img_name + extension) if", "now = time.time() print(\"eiiii youuuuu \") row_list = [] for i in range(len(folders)):", "> 5: # break img_name, imgext = os.path.splitext(name) shape= (1,1) if prevent_load_errors: shape", "import os import pandas as pd import units.unit as unit import time from", "img_name + extension) if filename: meta_data_list.append(entry+\"/\"+filename) shape = (1,1) neww = [path, path+\"/\"+name,", "#print(neww) count += 1 row_list.append(neww) #df.append(row_df, ignore_index=True) #print(df) #df.reset_index(drop=True) #df.loc[len(df.index)]= neww #except Exception", "njit, jit @jit(cache=False, forceobj = True) def getMetaData(append_by_name_meta_data, meta_data_extensions): out = [] for", "context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name = path+\"-\"+name #if fix_bmp: #resave images if there is a", "for name in os.listdir(path): if os.path.isfile(os.path.join(path, name)): if count % 100 == 0:", "that won't load on some libraries but with imageio # imageio.imwrite(savepath, img) meta_data_list", "not name.startswith(path): #print(\"preprend\") # os.rename(context.origin+\"/\"+path+\"/\"+name, context.origin+\"/\"+path+\"/\"+path+\"-\"+name) # savepath = context.origin+\"/\"+path+\"/\"+path+\"-\"+name # name =", "if count % 100 == 0: printTime(count, now) #i+=1 #if i > 5:" ]
[ "+ num_img_tr * epoch) if i == visualization_index: vis_img, vis_tgt, vis_out = image,", "from utils.summary import TensorboardSummary from utils.loss import SegmentationLosses from utils.calculate_weights import calculate_weights_labels from", "Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch)", "i == visualization_index: vis_img, vis_tgt, vis_out = image, target, output loss = self.criterion(output,", "shuffle=True, num_workers=args.workers) self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False,", "print('Using Adam') self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise NotImplementedError self.lr_scheduler = None if", "print(\"Test: \" if test else \"Validation:\") print('[Epoch: %d, numImages: %5d]' % (epoch, i", "in enumerate(tbar): image, target = sample['image'], sample['label'] image, target = image.cuda(), target.cuda() self.optimizer.zero_grad()", "self.optimizer.zero_grad() output = self.model(image) loss = self.criterion(output, target) loss.backward() self.optimizer.step() train_loss += loss.item()", "class Trainer: def __init__(self, args, model, train_set, val_set, test_set, class_weights, saver): self.args =", "\"Validation:\") print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print(\"Acc:{},", "= self.model(image) if i == visualization_index: vis_img, vis_tgt, vis_out = image, target, output", "saver): self.args = args self.saver = saver self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True,", "num_img_tr = len(self.train_dataloader) tbar = tqdm(self.train_dataloader, desc='\\r') visualization_index = int(random.random() * len(self.train_dataloader)) vis_img,", "Acc_class, mIoU, mIoU_20, FWIoU)) print('Loss: %.3f' % test_loss) if not test: new_pred =", "self.args = args self.saver = saver self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)", "FWIoU#, ret_list def load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) print(f'=> loaded checkpoint -", "new_pred self.saver.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, })", "args.lr * 10}] if args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32)) else: weight = None if", "loss: %.3f' % (train_loss / (i + 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr", "%.3f' % test_loss) if not test: new_pred = mIoU if new_pred > self.best_pred:", "loss: %.3f' % (test_loss / (i + 1))) pred = torch.argmax(output, dim=1).data.cpu().numpy() target", "Trainer: def __init__(self, args, model, train_set, val_set, test_set, class_weights, saver): self.args = args", "target = sample['image'], sample['label'] image, target = image.cuda(), target.cuda() with torch.no_grad(): output =", "self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print(\"Test: \" if test else \"Validation:\") print('[Epoch:", "self.dataset_size['train'], epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print('[Epoch: %d, numImages:", "else \"Validation:\") print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))", "utils.calculate_weights import calculate_weights_labels from torch.utils.data import DataLoader import numpy as np from utils.metrics", "self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary", "numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{},", "test: self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc',", "TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer = self.val_summary.create_summary() self.model = model self.dataset_size = {'train': len(train_set), 'val':", "get_learning_rate(self.optimizer), epoch) for i, sample in enumerate(tbar): image, target = sample['image'], sample['label'] image,", "image, target = sample['image'], sample['label'] image, target = image.cuda(), target.cuda() self.optimizer.zero_grad() output =", "def training(self, epoch): train_loss = 0.0 self.model.train() num_img_tr = len(self.train_dataloader) tbar = tqdm(self.train_dataloader,", "DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir,", "= image.cuda(), target.cuda() with torch.no_grad(): output = self.model(image) if i == visualization_index: vis_img,", "self.args.batch_size + image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU))", "train_set, val_set, test_set, class_weights, saver): self.args = args self.saver = saver self.saver.save_experiment_config() self.train_dataloader", "% train_loss) print('BestPred: %.3f' % self.best_pred) def validation(self, epoch, test=False): self.model.eval() self.evaluator.reset() ret_list", "image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU)) print('Loss: %.3f'", "torch import constants from utils.misc import get_learning_rate from utils.summary import TensorboardSummary from utils.loss", "* len(self.train_dataloader)) vis_img, vis_tgt, vis_out = None, None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for", "output = self.model(image) if i == visualization_index: vis_img, vis_tgt, vis_out = image, target,", "+ 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch) if i == visualization_index:", "== 'step': print('Using step lr scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in", "def load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) print(f'=> loaded checkpoint - epoch {checkpoint[\"epoch\"]})')", "= int(random.random() * len(self.train_dataloader)) vis_img, vis_tgt, vis_out = None, None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer),", "args.optimizer == 'SGD': print('Using SGD') self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer", "sample['label'] image, target = image.cuda(), target.cuda() self.optimizer.zero_grad() output = self.model(image) loss = self.criterion(output,", "DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer = self.train_summary.create_summary() self.val_summary =", "dim=1).data.cpu().numpy() target = target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc = self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU", "args self.saver = saver self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader =", "(i + 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch) if i ==", "self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class,", "= Evaluator(train_set.num_classes) self.best_pred = 0.0 def training(self, epoch): train_loss = 0.0 self.model.train() num_img_tr", "num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer = self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer", "desc='\\r') else: tbar = tqdm(self.val_dataloader, desc='\\r') test_loss = 0.0 visualization_index = int(random.random() *", "return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list def load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint()", "= len(self.train_dataloader) tbar = tqdm(self.train_dataloader, desc='\\r') visualization_index = int(random.random() * len(self.train_dataloader)) vis_img, vis_tgt,", "None for i, sample in enumerate(tbar): image, target = sample['image'], sample['label'] image, target", "'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, }) return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list", "from utils.loss import SegmentationLosses from utils.calculate_weights import calculate_weights_labels from torch.utils.data import DataLoader import", "self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc,", "test else \"Validation:\") print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size +", "= None if args.use_lr_scheduler: if args.lr_scheduler == 'step': print('Using step lr scheduler') self.lr_scheduler", "model self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)} train_params = [{'params': model.get_1x_lr_params(),", "Evaluator from tqdm import tqdm import random class Trainer: def __init__(self, args, model,", "image, target = image.cuda(), target.cuda() with torch.no_grad(): output = self.model(image) if i ==", "vis_img, vis_tgt, vis_out, epoch) print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size", "%5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print('Loss: %.3f' % train_loss) print('BestPred:", "train_loss) print('BestPred: %.3f' % self.best_pred) def validation(self, epoch, test=False): self.model.eval() self.evaluator.reset() ret_list =", "/ self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class',", "self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, }) return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#,", "= self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU =", "target, output loss = self.criterion(output, target) test_loss += loss.item() tbar.set_description('Test loss: %.3f' %", "print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print(\"Acc:{}, Acc_class:{},", "epoch) print(\"Test: \" if test else \"Validation:\") print('[Epoch: %d, numImages: %5d]' % (epoch,", "as np from utils.metrics import Evaluator from tqdm import tqdm import random class", "self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test: self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20',", "raise NotImplementedError self.lr_scheduler = None if args.use_lr_scheduler: if args.lr_scheduler == 'step': print('Using step", "= {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)} train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},", "== 'Adam': print('Using Adam') self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise NotImplementedError self.lr_scheduler =", "target) test_loss += loss.item() tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))", "'test': len(test_set)} train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr}, {'params': model.get_10x_lr_params(), 'lr': args.lr *", "TensorboardSummary from utils.loss import SegmentationLosses from utils.calculate_weights import calculate_weights_labels from torch.utils.data import DataLoader", "vis_tgt, vis_out = None, None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for i, sample in", "vis_tgt, vis_out = image, target, output self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch) if constants.VISUALIZATION:", "%5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU:", "sample['label'] image, target = image.cuda(), target.cuda() with torch.no_grad(): output = self.model(image) if i", "= self.val_summary.create_summary() self.model = model self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)}", "= self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if not", "test=False): self.model.eval() self.evaluator.reset() ret_list = [] if test: tbar = tqdm(self.test_dataloader, desc='\\r') else:", "mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test: self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'],", "loss = self.criterion(output, target) test_loss += loss.item() tbar.set_description('Test loss: %.3f' % (test_loss /", "epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print(\"Test:", "if i == visualization_index: vis_img, vis_tgt, vis_out = image, target, output self.train_writer.add_scalar('total_loss_epoch', train_loss", "1))) pred = torch.argmax(output, dim=1).data.cpu().numpy() target = target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc = self.evaluator.Pixel_Accuracy()", "output loss = self.criterion(output, target) test_loss += loss.item() tbar.set_description('Test loss: %.3f' % (test_loss", "np from utils.metrics import Evaluator from tqdm import tqdm import random class Trainer:", "utils.summary import TensorboardSummary from utils.loss import SegmentationLosses from utils.calculate_weights import calculate_weights_labels from torch.utils.data", "from utils.calculate_weights import calculate_weights_labels from torch.utils.data import DataLoader import numpy as np from", "import calculate_weights_labels from torch.utils.data import DataLoader import numpy as np from utils.metrics import", "if args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32)) else: weight = None if args.optimizer == 'SGD':", "model, train_set, val_set, test_set, class_weights, saver): self.args = args self.saver = saver self.saver.save_experiment_config()", "shuffle=False, num_workers=args.workers) self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer", "SGD') self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer == 'Adam': print('Using Adam')", "vis_out = image, target, output loss = self.criterion(output, target) test_loss += loss.item() tbar.set_description('Test", "sample['image'], sample['label'] image, target = image.cuda(), target.cuda() with torch.no_grad(): output = self.model(image) if", "self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer = self.val_summary.create_summary() self.model = model self.dataset_size =", "from utils.misc import get_learning_rate from utils.summary import TensorboardSummary from utils.loss import SegmentationLosses from", "self.best_pred) def validation(self, epoch, test=False): self.model.eval() self.evaluator.reset() ret_list = [] if test: tbar", "len(self.train_dataloader)) vis_img, vis_tgt, vis_out = None, None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for i,", "= TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer = self.val_summary.create_summary() self.model = model self.dataset_size = {'train': len(train_set),", "+= loss.item() tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1))) pred =", "'epoch': epoch + 1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, }) return test_loss,", "self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(\",\")], gamma=0.1) self.criterion = SegmentationLosses(weight=weight, ignore_index=255,", "import os import torch import constants from utils.misc import get_learning_rate from utils.summary import", "args.step_size.split(\",\")], gamma=0.1) self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes) self.best_pred = 0.0", "print('Using step lr scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(\",\")], gamma=0.1)", "import random class Trainer: def __init__(self, args, model, train_set, val_set, test_set, class_weights, saver):", "self.criterion(output, target) test_loss += loss.item() tbar.set_description('Test loss: %.3f' % (test_loss / (i +", "args.lr_scheduler == 'step': print('Using step lr scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x", "= SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes) self.best_pred = 0.0 def training(self, epoch):", "= sample['image'], sample['label'] image, target = image.cuda(), target.cuda() self.optimizer.zero_grad() output = self.model(image) loss", "ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes) self.best_pred = 0.0 def training(self, epoch): train_loss =", "self.evaluator = Evaluator(train_set.num_classes) self.best_pred = 0.0 def training(self, epoch): train_loss = 0.0 self.model.train()", "vis_out = None, None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for i, sample in enumerate(tbar):", "vis_out, epoch) print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))", "import tqdm import random class Trainer: def __init__(self, args, model, train_set, val_set, test_set,", "vis_tgt, vis_out = None, None, None for i, sample in enumerate(tbar): image, target", "= tqdm(self.test_dataloader, desc='\\r') else: tbar = tqdm(self.val_dataloader, desc='\\r') test_loss = 0.0 visualization_index =", "self.lr_scheduler = None if args.use_lr_scheduler: if args.lr_scheduler == 'step': print('Using step lr scheduler')", "= self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test: self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch)", "self.best_pred = 0.0 def training(self, epoch): train_loss = 0.0 self.model.train() num_img_tr = len(self.train_dataloader)", "model.get_1x_lr_params(), 'lr': args.lr}, {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}] if args.use_balanced_weights: weight =", "TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer = self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer = self.val_summary.create_summary() self.model", "mIoU, mIoU_20, FWIoU)) print('Loss: %.3f' % test_loss) if not test: new_pred = mIoU", "image.cuda(), target.cuda() self.optimizer.zero_grad() output = self.model(image) loss = self.criterion(output, target) loss.backward() self.optimizer.step() train_loss", "DataLoader import numpy as np from utils.metrics import Evaluator from tqdm import tqdm", "'Adam': print('Using Adam') self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise NotImplementedError self.lr_scheduler = None", "print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print('Loss: %.3f'", "* 10}] if args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32)) else: weight = None if args.optimizer", "= None, None, None for i, sample in enumerate(tbar): image, target = sample['image'],", "epoch) print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print('Loss:", "epoch) self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer,", "def validation(self, epoch, test=False): self.model.eval() self.evaluator.reset() ret_list = [] if test: tbar =", "args.use_lr_scheduler: if args.lr_scheduler == 'step': print('Using step lr scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x)", "self.train_writer = self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer = self.val_summary.create_summary() self.model = model", "= target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc = self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union()", "self.val_summary.create_summary() self.model = model self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)} train_params", "output self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out,", "10}] if args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32)) else: weight = None if args.optimizer ==", "image, target, output loss = self.criterion(output, target) test_loss += loss.item() tbar.set_description('Test loss: %.3f'", "tbar = tqdm(self.train_dataloader, desc='\\r') visualization_index = int(random.random() * len(self.train_dataloader)) vis_img, vis_tgt, vis_out =", "self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer == 'Adam': print('Using Adam') self.optimizer", "for x in args.step_size.split(\",\")], gamma=0.1) self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes)", "utils.loss import SegmentationLosses from utils.calculate_weights import calculate_weights_labels from torch.utils.data import DataLoader import numpy", "= args self.saver = saver self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader", "tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i +", "enumerate(tbar): image, target = sample['image'], sample['label'] image, target = image.cuda(), target.cuda() with torch.no_grad():", "self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print(\"Test: \"", "= TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer = self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer = self.val_summary.create_summary()", "= self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer = self.val_summary.create_summary() self.model = model self.dataset_size", "output = self.model(image) loss = self.criterion(output, target) loss.backward() self.optimizer.step() train_loss += loss.item() tbar.set_description('Train", "vis_tgt, vis_out = image, target, output loss = self.criterion(output, target) test_loss += loss.item()", "DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader = DataLoader(test_set,", "== visualization_index: vis_img, vis_tgt, vis_out = image, target, output loss = self.criterion(output, target)", "enumerate(tbar): image, target = sample['image'], sample['label'] image, target = image.cuda(), target.cuda() self.optimizer.zero_grad() output", "0.0 visualization_index = int(random.random() * len(self.val_dataloader)) vis_img, vis_tgt, vis_out = None, None, None", "args, model, train_set, val_set, test_set, class_weights, saver): self.args = args self.saver = saver", "Acc, Acc_class, FWIoU#, ret_list def load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) print(f'=> loaded", "* len(self.val_dataloader)) vis_img, vis_tgt, vis_out = None, None, None for i, sample in", "batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\"))", "SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes) self.best_pred = 0.0 def training(self, epoch): train_loss", "= self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test: self.val_writer.add_scalar('total_loss_epoch', test_loss", "+= loss.item() tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(),", "i * self.args.batch_size + image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU,", "weight = torch.from_numpy(class_weights.astype(np.float32)) else: weight = None if args.optimizer == 'SGD': print('Using SGD')", "self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()", "if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print(\"Test: \" if test else", "}) return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list def load_best_checkpoint(self): checkpoint =", "self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise NotImplementedError self.lr_scheduler = None if args.use_lr_scheduler: if", "constants from utils.misc import get_learning_rate from utils.summary import TensorboardSummary from utils.loss import SegmentationLosses", "mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU,", "if args.lr_scheduler == 'step': print('Using step lr scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for", "from utils.metrics import Evaluator from tqdm import tqdm import random class Trainer: def", "calculate_weights_labels from torch.utils.data import DataLoader import numpy as np from utils.metrics import Evaluator", "momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer == 'Adam': print('Using Adam') self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay)", "> self.best_pred: self.best_pred = new_pred self.saver.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': self.model.state_dict(), 'optimizer':", "train_loss = 0.0 self.model.train() num_img_tr = len(self.train_dataloader) tbar = tqdm(self.train_dataloader, desc='\\r') visualization_index =", "nesterov=args.nesterov) elif args.optimizer == 'Adam': print('Using Adam') self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise", "lr scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(\",\")], gamma=0.1) self.criterion =", "scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(\",\")], gamma=0.1) self.criterion = SegmentationLosses(weight=weight,", "+ 1))) pred = torch.argmax(output, dim=1).data.cpu().numpy() target = target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc =", "mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if constants.VISUALIZATION:", "__init__(self, args, model, train_set, val_set, test_set, class_weights, saver): self.args = args self.saver =", "if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print('[Epoch: %d, numImages: %5d]' %", "num_img_tr * epoch) if i == visualization_index: vis_img, vis_tgt, vis_out = image, target,", "int(random.random() * len(self.val_dataloader)) vis_img, vis_tgt, vis_out = None, None, None for i, sample", "'lr': args.lr * 10}] if args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32)) else: weight = None", "Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU)) print('Loss: %.3f' % test_loss)", "int(random.random() * len(self.train_dataloader)) vis_img, vis_tgt, vis_out = None, None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch)", "weight = None if args.optimizer == 'SGD': print('Using SGD') self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum,", "self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch) if i == visualization_index: vis_img, vis_tgt,", "test: new_pred = mIoU if new_pred > self.best_pred: self.best_pred = new_pred self.saver.save_checkpoint({ 'epoch':", "len(self.train_dataloader) tbar = tqdm(self.train_dataloader, desc='\\r') visualization_index = int(random.random() * len(self.train_dataloader)) vis_img, vis_tgt, vis_out", "self.evaluator.reset() ret_list = [] if test: tbar = tqdm(self.test_dataloader, desc='\\r') else: tbar =", "test_loss) if not test: new_pred = mIoU if new_pred > self.best_pred: self.best_pred =", "desc='\\r') test_loss = 0.0 visualization_index = int(random.random() * len(self.val_dataloader)) vis_img, vis_tgt, vis_out =", "(i + 1))) pred = torch.argmax(output, dim=1).data.cpu().numpy() target = target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc", "= tqdm(self.train_dataloader, desc='\\r') visualization_index = int(random.random() * len(self.train_dataloader)) vis_img, vis_tgt, vis_out = None,", "= None, None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for i, sample in enumerate(tbar): image,", "self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)} train_params = [{'params': model.get_1x_lr_params(), 'lr':", "len(test_set)} train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr}, {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]", "numpy as np from utils.metrics import Evaluator from tqdm import tqdm import random", "vis_img, vis_tgt, vis_out = None, None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for i, sample", "= image, target, output self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset,", "i + num_img_tr * epoch) if i == visualization_index: vis_img, vis_tgt, vis_out =", "test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list def load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict'])", "(epoch, i * self.args.batch_size + image.data.shape[0])) print('Loss: %.3f' % train_loss) print('BestPred: %.3f' %", "epoch, test=False): self.model.eval() self.evaluator.reset() ret_list = [] if test: tbar = tqdm(self.test_dataloader, desc='\\r')", "numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print('Loss: %.3f' % train_loss)", "%d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{},", "saver self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False,", "mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test: self.val_writer.add_scalar('total_loss_epoch',", "batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size,", "i, sample in enumerate(tbar): image, target = sample['image'], sample['label'] image, target = image.cuda(),", "test: tbar = tqdm(self.test_dataloader, desc='\\r') else: tbar = tqdm(self.val_dataloader, desc='\\r') test_loss = 0.0", "visualization_index: vis_img, vis_tgt, vis_out = image, target, output self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch)", "import DataLoader import numpy as np from utils.metrics import Evaluator from tqdm import", "constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print('[Epoch: %d, numImages: %5d]' % (epoch,", "num_workers=args.workers) self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)", "self.model.train() num_img_tr = len(self.train_dataloader) tbar = tqdm(self.train_dataloader, desc='\\r') visualization_index = int(random.random() * len(self.train_dataloader))", "% self.best_pred) def validation(self, epoch, test=False): self.model.eval() self.evaluator.reset() ret_list = [] if test:", "SegmentationLosses from utils.calculate_weights import calculate_weights_labels from torch.utils.data import DataLoader import numpy as np", "target.cuda() with torch.no_grad(): output = self.model(image) if i == visualization_index: vis_img, vis_tgt, vis_out", "+ image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU)) print('Loss:", "shuffle=False, num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer = self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\"))", "{'train': len(train_set), 'val': len(val_set), 'test': len(test_set)} train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr}, {'params':", "self.model(image) if i == visualization_index: vis_img, vis_tgt, vis_out = image, target, output loss", "(train_loss / (i + 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch) if", "self.evaluator.add_batch(target, pred) Acc = self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20 =", "self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print('[Epoch: %d, numImages: %5d]' % (epoch, i", "weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer == 'Adam': print('Using Adam') self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay) else:", "self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer = self.train_summary.create_summary()", "args.optimizer == 'Adam': print('Using Adam') self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise NotImplementedError self.lr_scheduler", "= sample['image'], sample['label'] image, target = image.cuda(), target.cuda() with torch.no_grad(): output = self.model(image)", "= image, target, output loss = self.criterion(output, target) test_loss += loss.item() tbar.set_description('Test loss:", "target = image.cuda(), target.cuda() self.optimizer.zero_grad() output = self.model(image) loss = self.criterion(output, target) loss.backward()", "self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for i, sample in enumerate(tbar): image, target = sample['image'], sample['label']", "with torch.no_grad(): output = self.model(image) if i == visualization_index: vis_img, vis_tgt, vis_out =", "= DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer = self.train_summary.create_summary() self.val_summary", "import numpy as np from utils.metrics import Evaluator from tqdm import tqdm import", "epoch) for i, sample in enumerate(tbar): image, target = sample['image'], sample['label'] image, target", "image, target = image.cuda(), target.cuda() self.optimizer.zero_grad() output = self.model(image) loss = self.criterion(output, target)", "print('BestPred: %.3f' % self.best_pred) def validation(self, epoch, test=False): self.model.eval() self.evaluator.reset() ret_list = []", "self.best_pred, }) return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list def load_best_checkpoint(self): checkpoint", "pred = torch.argmax(output, dim=1).data.cpu().numpy() target = target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc = self.evaluator.Pixel_Accuracy() Acc_class", "utils.metrics import Evaluator from tqdm import tqdm import random class Trainer: def __init__(self,", "= torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer == 'Adam': print('Using Adam') self.optimizer =", "= 0.0 def training(self, epoch): train_loss = 0.0 self.model.train() num_img_tr = len(self.train_dataloader) tbar", "self.saver = saver self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader = DataLoader(val_set,", "batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer = self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir,", "import Evaluator from tqdm import tqdm import random class Trainer: def __init__(self, args,", "1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch) if i == visualization_index: vis_img,", "epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch)", "self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print('[Epoch: %d, numImages: %5d]' % (epoch, i *", "* self.args.batch_size + image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, mIoU_20,", "print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU)) print('Loss: %.3f' %", "torch.from_numpy(class_weights.astype(np.float32)) else: weight = None if args.optimizer == 'SGD': print('Using SGD') self.optimizer =", "image, target, output self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img,", "train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr}, {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}] if", "test_loss += loss.item() tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1))) pred", "self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test:", "Acc_class, FWIoU#, ret_list def load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) print(f'=> loaded checkpoint", "= self.model(image) loss = self.criterion(output, target) loss.backward() self.optimizer.step() train_loss += loss.item() tbar.set_description('Train loss:", "visualization_index: vis_img, vis_tgt, vis_out = image, target, output loss = self.criterion(output, target) test_loss", "if not test: self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20,", "print('Loss: %.3f' % test_loss) if not test: new_pred = mIoU if new_pred >", "self.optimizer.step() train_loss += loss.item() tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))", "print('Using SGD') self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer == 'Adam': print('Using", "if test: tbar = tqdm(self.test_dataloader, desc='\\r') else: tbar = tqdm(self.val_dataloader, desc='\\r') test_loss =", "\"validation\")) self.val_writer = self.val_summary.create_summary() self.model = model self.dataset_size = {'train': len(train_set), 'val': len(val_set),", "load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) print(f'=> loaded checkpoint - epoch {checkpoint[\"epoch\"]})') return", "self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes) self.best_pred = 0.0 def training(self,", "= self.criterion(output, target) loss.backward() self.optimizer.step() train_loss += loss.item() tbar.set_description('Train loss: %.3f' % (train_loss", "image.cuda(), target.cuda() with torch.no_grad(): output = self.model(image) if i == visualization_index: vis_img, vis_tgt,", "new_pred > self.best_pred: self.best_pred = new_pred self.saver.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': self.model.state_dict(),", "Adam') self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise NotImplementedError self.lr_scheduler = None if args.use_lr_scheduler:", "self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch)", "= mIoU if new_pred > self.best_pred: self.best_pred = new_pred self.saver.save_checkpoint({ 'epoch': epoch +", "self.best_pred: self.best_pred = new_pred self.saver.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(),", "image, target = sample['image'], sample['label'] image, target = image.cuda(), target.cuda() with torch.no_grad(): output", "def __init__(self, args, model, train_set, val_set, test_set, class_weights, saver): self.args = args self.saver", "mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU)) print('Loss: %.3f' % test_loss) if", "loss.item(), i + num_img_tr * epoch) if i == visualization_index: vis_img, vis_tgt, vis_out", "target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc = self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20", "self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test: self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU',", "Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if", "'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, }) return test_loss, mIoU, mIoU_20, Acc, Acc_class,", "epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print('[Epoch: %d, numImages: %5d]'", "* epoch) if i == visualization_index: vis_img, vis_tgt, vis_out = image, target, output", "self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer = self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer =", "None, None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for i, sample in enumerate(tbar): image, target", "self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print(\"Test: \" if test else \"Validation:\") print('[Epoch: %d,", "not test: new_pred = mIoU if new_pred > self.best_pred: self.best_pred = new_pred self.saver.save_checkpoint({", "test_set, class_weights, saver): self.args = args self.saver = saver self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set,", "% (train_loss / (i + 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch)", "== visualization_index: vis_img, vis_tgt, vis_out = image, target, output self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'],", "torch.no_grad(): output = self.model(image) if i == visualization_index: vis_img, vis_tgt, vis_out = image,", "self.saver.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, }) return", "'val': len(val_set), 'test': len(test_set)} train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr}, {'params': model.get_10x_lr_params(), 'lr':", "vis_tgt, vis_out, epoch) print(\"Test: \" if test else \"Validation:\") print('[Epoch: %d, numImages: %5d]'", "elif args.optimizer == 'Adam': print('Using Adam') self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise NotImplementedError", "self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)", "NotImplementedError self.lr_scheduler = None if args.use_lr_scheduler: if args.lr_scheduler == 'step': print('Using step lr", "None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for i, sample in enumerate(tbar): image, target = sample['image'],", "mIoU if new_pred > self.best_pred: self.best_pred = new_pred self.saver.save_checkpoint({ 'epoch': epoch + 1,", "= model self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)} train_params = [{'params':", "target.cuda() self.optimizer.zero_grad() output = self.model(image) loss = self.criterion(output, target) loss.backward() self.optimizer.step() train_loss +=", "= self.criterion(output, target) test_loss += loss.item() tbar.set_description('Test loss: %.3f' % (test_loss / (i", "% (epoch, i * self.args.batch_size + image.data.shape[0])) print('Loss: %.3f' % train_loss) print('BestPred: %.3f'", "new_pred = mIoU if new_pred > self.best_pred: self.best_pred = new_pred self.saver.save_checkpoint({ 'epoch': epoch", "sample['image'], sample['label'] image, target = image.cuda(), target.cuda() self.optimizer.zero_grad() output = self.model(image) loss =", "constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print(\"Test: \" if test else \"Validation:\")", "tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1))) pred = torch.argmax(output, dim=1).data.cpu().numpy()", "import constants from utils.misc import get_learning_rate from utils.summary import TensorboardSummary from utils.loss import", "get_learning_rate from utils.summary import TensorboardSummary from utils.loss import SegmentationLosses from utils.calculate_weights import calculate_weights_labels", "0.0 def training(self, epoch): train_loss = 0.0 self.model.train() num_img_tr = len(self.train_dataloader) tbar =", "milestones=[int(x) for x in args.step_size.split(\",\")], gamma=0.1) self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator =", "loss.item() tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i", "= None if args.optimizer == 'SGD': print('Using SGD') self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay,", "= tqdm(self.val_dataloader, desc='\\r') test_loss = 0.0 visualization_index = int(random.random() * len(self.val_dataloader)) vis_img, vis_tgt,", "val_set, test_set, class_weights, saver): self.args = args self.saver = saver self.saver.save_experiment_config() self.train_dataloader =", "if args.optimizer == 'SGD': print('Using SGD') self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif", "= self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test: self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch)", "os import torch import constants from utils.misc import get_learning_rate from utils.summary import TensorboardSummary", "args.lr}, {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}] if args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32)) else:", "else: raise NotImplementedError self.lr_scheduler = None if args.use_lr_scheduler: if args.lr_scheduler == 'step': print('Using", "tqdm(self.train_dataloader, desc='\\r') visualization_index = int(random.random() * len(self.train_dataloader)) vis_img, vis_tgt, vis_out = None, None,", "%d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0])) print('Loss: %.3f' %", "= torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise NotImplementedError self.lr_scheduler = None if args.use_lr_scheduler: if args.lr_scheduler", "'lr': args.lr}, {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}] if args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32))", "i * self.args.batch_size + image.data.shape[0])) print('Loss: %.3f' % train_loss) print('BestPred: %.3f' % self.best_pred)", "% test_loss) if not test: new_pred = mIoU if new_pred > self.best_pred: self.best_pred", "else: tbar = tqdm(self.val_dataloader, desc='\\r') test_loss = 0.0 visualization_index = int(random.random() * len(self.val_dataloader))", "vis_out = image, target, output self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer,", "len(val_set), 'test': len(test_set)} train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr}, {'params': model.get_10x_lr_params(), 'lr': args.lr", "vis_out = None, None, None for i, sample in enumerate(tbar): image, target =", "= torch.from_numpy(class_weights.astype(np.float32)) else: weight = None if args.optimizer == 'SGD': print('Using SGD') self.optimizer", "len(train_set), 'val': len(val_set), 'test': len(test_set)} train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr}, {'params': model.get_10x_lr_params(),", "import get_learning_rate from utils.summary import TensorboardSummary from utils.loss import SegmentationLosses from utils.calculate_weights import", "i == visualization_index: vis_img, vis_tgt, vis_out = image, target, output self.train_writer.add_scalar('total_loss_epoch', train_loss /", "in enumerate(tbar): image, target = sample['image'], sample['label'] image, target = image.cuda(), target.cuda() with", "epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch)", "vis_img, vis_tgt, vis_out = image, target, output loss = self.criterion(output, target) test_loss +=", "'SGD': print('Using SGD') self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer == 'Adam':", "mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list def load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer'])", "%.3f' % train_loss) print('BestPred: %.3f' % self.best_pred) def validation(self, epoch, test=False): self.model.eval() self.evaluator.reset()", "if i == visualization_index: vis_img, vis_tgt, vis_out = image, target, output loss =", "pred) Acc = self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20()", "torch.optim.Adam(train_params, weight_decay=args.weight_decay) else: raise NotImplementedError self.lr_scheduler = None if args.use_lr_scheduler: if args.lr_scheduler ==", "FWIoU)) print('Loss: %.3f' % test_loss) if not test: new_pred = mIoU if new_pred", "%.3f' % (train_loss / (i + 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr *", "self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if", "[] if test: tbar = tqdm(self.test_dataloader, desc='\\r') else: tbar = tqdm(self.val_dataloader, desc='\\r') test_loss", "%.3f' % self.best_pred) def validation(self, epoch, test=False): self.model.eval() self.evaluator.reset() ret_list = [] if", "weight_decay=args.weight_decay) else: raise NotImplementedError self.lr_scheduler = None if args.use_lr_scheduler: if args.lr_scheduler == 'step':", "%.3f' % (test_loss / (i + 1))) pred = torch.argmax(output, dim=1).data.cpu().numpy() target =", "x in args.step_size.split(\",\")], gamma=0.1) self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes) self.best_pred", "num_workers=args.workers) self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"train\")) self.train_writer =", "self.model(image) loss = self.criterion(output, target) loss.backward() self.optimizer.step() train_loss += loss.item() tbar.set_description('Train loss: %.3f'", "loss.backward() self.optimizer.step() train_loss += loss.item() tbar.set_description('Train loss: %.3f' % (train_loss / (i +", "sample in enumerate(tbar): image, target = sample['image'], sample['label'] image, target = image.cuda(), target.cuda()", "tbar = tqdm(self.val_dataloader, desc='\\r') test_loss = 0.0 visualization_index = int(random.random() * len(self.val_dataloader)) vis_img,", "import torch import constants from utils.misc import get_learning_rate from utils.summary import TensorboardSummary from", "vis_img, vis_tgt, vis_out = None, None, None for i, sample in enumerate(tbar): image,", "cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes) self.best_pred = 0.0 def training(self, epoch): train_loss = 0.0", "= [{'params': model.get_1x_lr_params(), 'lr': args.lr}, {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}] if args.use_balanced_weights:", "None if args.use_lr_scheduler: if args.lr_scheduler == 'step': print('Using step lr scheduler') self.lr_scheduler =", "tqdm(self.test_dataloader, desc='\\r') else: tbar = tqdm(self.val_dataloader, desc='\\r') test_loss = 0.0 visualization_index = int(random.random()", "for i, sample in enumerate(tbar): image, target = sample['image'], sample['label'] image, target =", "self.model.eval() self.evaluator.reset() ret_list = [] if test: tbar = tqdm(self.test_dataloader, desc='\\r') else: tbar", "= 0.0 visualization_index = int(random.random() * len(self.val_dataloader)) vis_img, vis_tgt, vis_out = None, None,", "\" if test else \"Validation:\") print('[Epoch: %d, numImages: %5d]' % (epoch, i *", "vis_out, epoch) print(\"Test: \" if test else \"Validation:\") print('[Epoch: %d, numImages: %5d]' %", "/ (i + 1))) pred = torch.argmax(output, dim=1).data.cpu().numpy() target = target.cpu().numpy() self.evaluator.add_batch(target, pred)", "random class Trainer: def __init__(self, args, model, train_set, val_set, test_set, class_weights, saver): self.args", "epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print(\"Test: \" if test", "tbar = tqdm(self.test_dataloader, desc='\\r') else: tbar = tqdm(self.val_dataloader, desc='\\r') test_loss = 0.0 visualization_index", "self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out,", "= int(random.random() * len(self.val_dataloader)) vis_img, vis_tgt, vis_out = None, None, None for i,", "self.best_pred = new_pred self.saver.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred':", "FWIoU, epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print(\"Test: \" if", "if not test: new_pred = mIoU if new_pred > self.best_pred: self.best_pred = new_pred", "from torch.utils.data import DataLoader import numpy as np from utils.metrics import Evaluator from", "= torch.argmax(output, dim=1).data.cpu().numpy() target = target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc = self.evaluator.Pixel_Accuracy() Acc_class =", "self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset,", "if args.use_lr_scheduler: if args.lr_scheduler == 'step': print('Using step lr scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer,", "gamma=0.1) self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes) self.best_pred = 0.0 def", "tqdm import tqdm import random class Trainer: def __init__(self, args, model, train_set, val_set,", "None, None self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch) for i, sample in enumerate(tbar): image, target =", "visualization_index = int(random.random() * len(self.val_dataloader)) vis_img, vis_tgt, vis_out = None, None, None for", "checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) print(f'=> loaded checkpoint - epoch {checkpoint[\"epoch\"]})') return checkpoint[\"epoch\"]", "= saver self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size,", "FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test: self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU,", "epoch + 1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, }) return test_loss, mIoU,", "image.data.shape[0])) print('Loss: %.3f' % train_loss) print('BestPred: %.3f' % self.best_pred) def validation(self, epoch, test=False):", "validation(self, epoch, test=False): self.model.eval() self.evaluator.reset() ret_list = [] if test: tbar = tqdm(self.test_dataloader,", "train_loss / self.dataset_size['train'], epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print('[Epoch:", "'step': print('Using step lr scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(\",\")],", "print('Loss: %.3f' % train_loss) print('BestPred: %.3f' % self.best_pred) def validation(self, epoch, test=False): self.model.eval()", "= DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader =", "self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU',", "test_loss = 0.0 visualization_index = int(random.random() * len(self.val_dataloader)) vis_img, vis_tgt, vis_out = None,", "% (epoch, i * self.args.batch_size + image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc,", "1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, }) return test_loss, mIoU, mIoU_20, Acc,", "model.get_10x_lr_params(), 'lr': args.lr * 10}] if args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32)) else: weight =", "torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(\",\")], gamma=0.1) self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator", "== 'SGD': print('Using SGD') self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer ==", "torch.argmax(output, dim=1).data.cpu().numpy() target = target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc = self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class()", "epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt,", "self.args.batch_size + image.data.shape[0])) print('Loss: %.3f' % train_loss) print('BestPred: %.3f' % self.best_pred) def validation(self,", "= new_pred self.saver.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred,", "self.model = model self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)} train_params =", "utils.misc import get_learning_rate from utils.summary import TensorboardSummary from utils.loss import SegmentationLosses from utils.calculate_weights", "from tqdm import tqdm import random class Trainer: def __init__(self, args, model, train_set,", "Evaluator(train_set.num_classes) self.best_pred = 0.0 def training(self, epoch): train_loss = 0.0 self.model.train() num_img_tr =", "visualization_index = int(random.random() * len(self.train_dataloader)) vis_img, vis_tgt, vis_out = None, None, None self.train_writer.add_scalar('learning_rate',", "if test else \"Validation:\") print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size", "tqdm import random class Trainer: def __init__(self, args, model, train_set, val_set, test_set, class_weights,", "+ 1, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'best_pred': self.best_pred, }) return test_loss, mIoU, mIoU_20,", "vis_img, vis_tgt, vis_out, epoch) print(\"Test: \" if test else \"Validation:\") print('[Epoch: %d, numImages:", "epoch) if i == visualization_index: vis_img, vis_tgt, vis_out = image, target, output self.train_writer.add_scalar('total_loss_epoch',", "self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader", "mIoU_20, Acc, Acc_class, FWIoU#, ret_list def load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) print(f'=>", "None if args.optimizer == 'SGD': print('Using SGD') self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov)", "= DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers) self.train_summary =", "step lr scheduler') self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(\",\")], gamma=0.1) self.criterion", "target, output self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt,", "torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov) elif args.optimizer == 'Adam': print('Using Adam') self.optimizer = torch.optim.Adam(train_params,", "target = target.cpu().numpy() self.evaluator.add_batch(target, pred) Acc = self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU =", "{}\".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU)) print('Loss: %.3f' % test_loss) if not test: new_pred", "None, None for i, sample in enumerate(tbar): image, target = sample['image'], sample['label'] image,", "torch.utils.data import DataLoader import numpy as np from utils.metrics import Evaluator from tqdm", "0.0 self.model.train() num_img_tr = len(self.train_dataloader) tbar = tqdm(self.train_dataloader, desc='\\r') visualization_index = int(random.random() *", "= 0.0 self.model.train() num_img_tr = len(self.train_dataloader) tbar = tqdm(self.train_dataloader, desc='\\r') visualization_index = int(random.random()", "in args.step_size.split(\",\")], gamma=0.1) self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type) self.evaluator = Evaluator(train_set.num_classes) self.best_pred =", "'best_pred': self.best_pred, }) return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list def load_best_checkpoint(self):", "import SegmentationLosses from utils.calculate_weights import calculate_weights_labels from torch.utils.data import DataLoader import numpy as", "not test: self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch)", "import TensorboardSummary from utils.loss import SegmentationLosses from utils.calculate_weights import calculate_weights_labels from torch.utils.data import", "* self.args.batch_size + image.data.shape[0])) print('Loss: %.3f' % train_loss) print('BestPred: %.3f' % self.best_pred) def", "target = image.cuda(), target.cuda() with torch.no_grad(): output = self.model(image) if i == visualization_index:", "fwIoU: {}\".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU)) print('Loss: %.3f' % test_loss) if not test:", "training(self, epoch): train_loss = 0.0 self.model.train() num_img_tr = len(self.train_dataloader) tbar = tqdm(self.train_dataloader, desc='\\r')", "else: weight = None if args.optimizer == 'SGD': print('Using SGD') self.optimizer = torch.optim.SGD(train_params,", "mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU)) print('Loss: %.3f' % test_loss) if not", "None, None, None for i, sample in enumerate(tbar): image, target = sample['image'], sample['label']", "Acc = self.evaluator.Pixel_Accuracy() Acc_class = self.evaluator.Pixel_Accuracy_Class() mIoU = self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU", "(epoch, i * self.args.batch_size + image.data.shape[0])) print(\"Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}\".format(Acc, Acc_class,", "[{'params': model.get_1x_lr_params(), 'lr': args.lr}, {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}] if args.use_balanced_weights: weight", "vis_tgt, vis_out, epoch) print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size +", "loss = self.criterion(output, target) loss.backward() self.optimizer.step() train_loss += loss.item() tbar.set_description('Train loss: %.3f' %", "self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer = self.val_summary.create_summary() self.model = model self.dataset_size = {'train':", "\"train\")) self.train_writer = self.train_summary.create_summary() self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, \"validation\")) self.val_writer = self.val_summary.create_summary() self.model =", "len(self.val_dataloader)) vis_img, vis_tgt, vis_out = None, None, None for i, sample in enumerate(tbar):", "mIoU_20, FWIoU)) print('Loss: %.3f' % test_loss) if not test: new_pred = mIoU if", "loss.item() tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1))) pred = torch.argmax(output,", "args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32)) else: weight = None if args.optimizer == 'SGD': print('Using", "target) loss.backward() self.optimizer.step() train_loss += loss.item() tbar.set_description('Train loss: %.3f' % (train_loss / (i", "/ self.dataset_size['train'], epoch) if constants.VISUALIZATION: self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch) print('[Epoch: %d,", "{'params': model.get_10x_lr_params(), 'lr': args.lr * 10}] if args.use_balanced_weights: weight = torch.from_numpy(class_weights.astype(np.float32)) else: weight", "= torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(\",\")], gamma=0.1) self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type)", "self.optimizer.state_dict(), 'best_pred': self.best_pred, }) return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list def", "self.criterion(output, target) loss.backward() self.optimizer.step() train_loss += loss.item() tbar.set_description('Train loss: %.3f' % (train_loss /", "= [] if test: tbar = tqdm(self.test_dataloader, desc='\\r') else: tbar = tqdm(self.val_dataloader, desc='\\r')", "self.evaluator.Mean_Intersection_over_Union() mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20() FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union() if not test: self.val_writer.add_scalar('total_loss_epoch', test_loss /", "% (test_loss / (i + 1))) pred = torch.argmax(output, dim=1).data.cpu().numpy() target = target.cpu().numpy()", "ret_list = [] if test: tbar = tqdm(self.test_dataloader, desc='\\r') else: tbar = tqdm(self.val_dataloader,", "self.val_writer = self.val_summary.create_summary() self.model = model self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test':", "train_loss += loss.item() tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1))) self.train_writer.add_scalar('total_loss_iter',", "if new_pred > self.best_pred: self.best_pred = new_pred self.saver.save_checkpoint({ 'epoch': epoch + 1, 'state_dict':", "Acc, epoch) self.val_writer.add_scalar('Acc_class', Acc_class, epoch) self.val_writer.add_scalar('fwIoU', FWIoU, epoch) if constants.VISUALIZATION: self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img,", "epoch): train_loss = 0.0 self.model.train() num_img_tr = len(self.train_dataloader) tbar = tqdm(self.train_dataloader, desc='\\r') visualization_index", "= image.cuda(), target.cuda() self.optimizer.zero_grad() output = self.model(image) loss = self.criterion(output, target) loss.backward() self.optimizer.step()", "test_loss / self.dataset_size['val'], epoch) self.val_writer.add_scalar('mIoU', mIoU, epoch) self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch) self.val_writer.add_scalar('Acc', Acc, epoch)", "(test_loss / (i + 1))) pred = torch.argmax(output, dim=1).data.cpu().numpy() target = target.cpu().numpy() self.evaluator.add_batch(target,", "desc='\\r') visualization_index = int(random.random() * len(self.train_dataloader)) vis_img, vis_tgt, vis_out = None, None, None", "target = sample['image'], sample['label'] image, target = image.cuda(), target.cuda() self.optimizer.zero_grad() output = self.model(image)", "ret_list def load_best_checkpoint(self): checkpoint = self.saver.load_checkpoint() self.model.load_state_dict(checkpoint['state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer']) print(f'=> loaded checkpoint - epoch", "+ image.data.shape[0])) print('Loss: %.3f' % train_loss) print('BestPred: %.3f' % self.best_pred) def validation(self, epoch,", "/ (i + 1))) self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch) if i", "class_weights, saver): self.args = args self.saver = saver self.saver.save_experiment_config() self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size,", "vis_img, vis_tgt, vis_out = image, target, output self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch) if", "tqdm(self.val_dataloader, desc='\\r') test_loss = 0.0 visualization_index = int(random.random() * len(self.val_dataloader)) vis_img, vis_tgt, vis_out" ]
[ "get_schedule(page: str): if not page.isdecimal(): return False variables = {\"page\": page} return variables", "def get_schedule(page: str): if not page.isdecimal(): return False variables = {\"page\": page} return", "<gh_stars>0 def get_schedule(page: str): if not page.isdecimal(): return False variables = {\"page\": page}" ]
[ "by CarveMe' Lreu_ca.id = 'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t')", "bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df,", "= gen.id.replace('G_','') # combine met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml')", "change according the report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized) Lreu_ca_standardlized = CarveMe_processing(Lreu_ca_standardlized) cobra.io.save_json_model(Lreu_ca_standardlized, 'CarveMe/Lreu_ca.json') cobra.io.save_json_model(Lreu_ca_gp_standardlized,", "utf-8 -*- # Created by lhao at 2019-05-17 ''' input: L.reuteri protein sequence", "of L reuteri by CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description =", "= cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM of L reuteri by CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp'", "report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM of L reuteri", "# %% <Manual change according the report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized) Lreu_ca_standardlized = CarveMe_processing(Lreu_ca_standardlized)", "'G_id' for gen in covermemodel.genes: gen.id = gen.id.replace('G_','') # combine met according report", "= pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df,", "bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report", "Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM of L reuteri by CarveMe' Lreu_ca_gp.id =", "= pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report =", "Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df,", "'other' #'first' or 'other' # %% <build> if case =='frist': #Gram positive os.system('carve", "Lreu_ca_gp.description = 'GEM of L reuteri by CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca =", "= My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_') # %% <Manual change according the report> Lreu_ca_gp_standardlized", "'other' # %% <build> if case =='frist': #Gram positive os.system('carve Lreuteri_biogaia_v03.faa --cobra -u", "My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM of L reuteri by", "case = 'other' #'first' or 'other' # %% <build> if case =='frist': #Gram", "report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized) Lreu_ca_standardlized = CarveMe_processing(Lreu_ca_standardlized) cobra.io.save_json_model(Lreu_ca_standardlized, 'CarveMe/Lreu_ca.json') cobra.io.save_json_model(Lreu_ca_gp_standardlized, 'CarveMe/Lreu_ca_gp.json') #My_def.io_outtxt(Lreu_ca,'CarveMe/Lreu_ca.txt',True) #My_def.io_outtxt(Lreu_ca_gp,'CarveMe/Lreu_ca_gp.txt',True)", "CarveMe_processing(covermemodel): #change gene id 'G_id' for gen in covermemodel.genes: gen.id = gen.id.replace('G_','') #", "import cobra import My_def import pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other' #'first'", "at 2019-05-17 ''' input: L.reuteri protein sequence output: draft model ''' import os", "sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized,", "positive os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa --cobra -o", "-*- # Created by lhao at 2019-05-17 ''' input: L.reuteri protein sequence output:", "return covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM of L reuteri by CarveMe'", "gen in covermemodel.genes: gen.id = gen.id.replace('G_','') # combine met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return", "def CarveMe_processing(covermemodel): #change gene id 'G_id' for gen in covermemodel.genes: gen.id = gen.id.replace('G_','')", "coding: utf-8 -*- # Created by lhao at 2019-05-17 ''' input: L.reuteri protein", "''' input: L.reuteri protein sequence output: draft model ''' import os import cobra", "-u grampos -o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml'); # %% <standstandardlization>", "pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp,", "as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other' #'first' or 'other' # %% <build> if", "output: draft model ''' import os import cobra import My_def import pandas as", "= My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_') #", "My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_') # %%", "= 'GEM of L reuteri by CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml')", "met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM of", "os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other' #'first' or 'other' # %% <build> if case =='frist':", "= 'GEM of L reuteri by CarveMe' Lreu_ca.id = 'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv',", "grampos -o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml'); # %% <standstandardlization> def", "for gen in covermemodel.genes: gen.id = gen.id.replace('G_','') # combine met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel)", "<Manual change according the report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized) Lreu_ca_standardlized = CarveMe_processing(Lreu_ca_standardlized) cobra.io.save_json_model(Lreu_ca_standardlized, 'CarveMe/Lreu_ca.json')", "os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml');", "cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM of L reuteri by CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca", "CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM of L reuteri", "=='frist': #Gram positive os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa", "'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM of L reuteri by CarveMe' Lreu_ca.id", "'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca,", "gene id 'G_id' for gen in covermemodel.genes: gen.id = gen.id.replace('G_','') # combine met", "#all os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml'); # %% <standstandardlization> def CarveMe_processing(covermemodel): #change gene", "bigg_met_df, compartment='_') # %% <Manual change according the report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized) Lreu_ca_standardlized", "CarveMe' Lreu_ca.id = 'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized,", "CarveMe/Lreu_ca.xml'); # %% <standstandardlization> def CarveMe_processing(covermemodel): #change gene id 'G_id' for gen in", "cobra import My_def import pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other' #'first' or", "according the report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized) Lreu_ca_standardlized = CarveMe_processing(Lreu_ca_standardlized) cobra.io.save_json_model(Lreu_ca_standardlized, 'CarveMe/Lreu_ca.json') cobra.io.save_json_model(Lreu_ca_gp_standardlized, 'CarveMe/Lreu_ca_gp.json')", "if case =='frist': #Gram positive os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml'); #all", "reuteri by CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM of", "ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_')", "--cobra -o CarveMe/Lreu_ca.xml'); # %% <standstandardlization> def CarveMe_processing(covermemodel): #change gene id 'G_id' for", "model ''' import os import cobra import My_def import pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/')", "ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_') # %% <Manual change according the report>", "cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM of L reuteri by CarveMe' Lreu_ca.id = 'Lreu_ca' bigg_rea_df", "Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM of L reuteri by CarveMe' Lreu_ca.id =", "= 'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM of L reuteri by CarveMe'", "L reuteri by CarveMe' Lreu_ca.id = 'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df =", "by CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM of L", "Created by lhao at 2019-05-17 ''' input: L.reuteri protein sequence output: draft model", "Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM of L reuteri by", "#Gram positive os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa --cobra", "#change gene id 'G_id' for gen in covermemodel.genes: gen.id = gen.id.replace('G_','') # combine", "import My_def import pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other' #'first' or 'other'", "of L reuteri by CarveMe' Lreu_ca.id = 'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df", "id 'G_id' for gen in covermemodel.genes: gen.id = gen.id.replace('G_','') # combine met according", "by lhao at 2019-05-17 ''' input: L.reuteri protein sequence output: draft model '''", "sep='\\t') Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df,", "#'first' or 'other' # %% <build> if case =='frist': #Gram positive os.system('carve Lreuteri_biogaia_v03.faa", "covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM of L reuteri by CarveMe' Lreu_ca_gp.id", "input: L.reuteri protein sequence output: draft model ''' import os import cobra import", "gen.id = gen.id.replace('G_','') # combine met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp =", "# Created by lhao at 2019-05-17 ''' input: L.reuteri protein sequence output: draft", "in covermemodel.genes: gen.id = gen.id.replace('G_','') # combine met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel", "bigg_rea_df, bigg_met_df, compartment='_') # %% <Manual change according the report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized)", "python # -*- coding: utf-8 -*- # Created by lhao at 2019-05-17 '''", "pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other' #'first' or 'other' # %% <build> if case", "protein sequence output: draft model ''' import os import cobra import My_def import", "Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml'); #", "lhao at 2019-05-17 ''' input: L.reuteri protein sequence output: draft model ''' import", "'GEM of L reuteri by CarveMe' Lreu_ca.id = 'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t')", "Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_') # %% <Manual change according the", "%% <Manual change according the report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized) Lreu_ca_standardlized = CarveMe_processing(Lreu_ca_standardlized) cobra.io.save_json_model(Lreu_ca_standardlized,", "pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other' #'first' or 'other' # %% <build>", "2019-05-17 ''' input: L.reuteri protein sequence output: draft model ''' import os import", "Lreu_ca.description = 'GEM of L reuteri by CarveMe' Lreu_ca.id = 'Lreu_ca' bigg_rea_df =", "Lreu_ca.id = 'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report", "sequence output: draft model ''' import os import cobra import My_def import pandas", "--cobra -u grampos -o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml'); # %%", "'GEM of L reuteri by CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description", "Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml'); # %% <standstandardlization> def CarveMe_processing(covermemodel): #change gene id 'G_id'", "bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_') # %% <Manual change", "%% <standstandardlization> def CarveMe_processing(covermemodel): #change gene id 'G_id' for gen in covermemodel.genes: gen.id", "bigg_rea_df, bigg_met_df, compartment='_') Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_') # %% <Manual", "-o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml'); # %% <standstandardlization> def CarveMe_processing(covermemodel):", "gen.id.replace('G_','') # combine met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description", "os import cobra import My_def import pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other'", "covermemodel.genes: gen.id = gen.id.replace('G_','') # combine met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp", "#!/usr/bin/env python # -*- coding: utf-8 -*- # Created by lhao at 2019-05-17", "L.reuteri protein sequence output: draft model ''' import os import cobra import My_def", "CarveMe/Lreu_ca_gp.xml'); #all os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml'); # %% <standstandardlization> def CarveMe_processing(covermemodel): #change", "# %% <standstandardlization> def CarveMe_processing(covermemodel): #change gene id 'G_id' for gen in covermemodel.genes:", "import os import cobra import My_def import pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case =", "according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM of L", "draft model ''' import os import cobra import My_def import pandas as pd", "reuteri by CarveMe' Lreu_ca.id = 'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv',", "= 'other' #'first' or 'other' # %% <build> if case =='frist': #Gram positive", "os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml'); # %% <standstandardlization> def CarveMe_processing(covermemodel): #change gene id", "My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_') # %% <Manual change according the report> Lreu_ca_gp_standardlized =", "My_def import pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other' #'first' or 'other' #", "<build> if case =='frist': #Gram positive os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml');", "compartment='_') # %% <Manual change according the report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized) Lreu_ca_standardlized =", "= cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM of L reuteri by CarveMe' Lreu_ca.id = 'Lreu_ca'", "# %% <build> if case =='frist': #Gram positive os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos", "case =='frist': #Gram positive os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml'); #all os.system('carve", "# -*- coding: utf-8 -*- # Created by lhao at 2019-05-17 ''' input:", "compartment='_') Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df, compartment='_') # %% <Manual change according", "''' import os import cobra import My_def import pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case", "the report> Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized) Lreu_ca_standardlized = CarveMe_processing(Lreu_ca_standardlized) cobra.io.save_json_model(Lreu_ca_standardlized, 'CarveMe/Lreu_ca.json') cobra.io.save_json_model(Lreu_ca_gp_standardlized, 'CarveMe/Lreu_ca_gp.json') #My_def.io_outtxt(Lreu_ca,'CarveMe/Lreu_ca.txt',True)", "L reuteri by CarveMe' Lreu_ca_gp.id = 'Lreu_ca_gp' Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml') Lreu_ca.description = 'GEM", "or 'other' # %% <build> if case =='frist': #Gram positive os.system('carve Lreuteri_biogaia_v03.faa --cobra", "-o CarveMe/Lreu_ca.xml'); # %% <standstandardlization> def CarveMe_processing(covermemodel): #change gene id 'G_id' for gen", "# combine met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description =", "-*- coding: utf-8 -*- # Created by lhao at 2019-05-17 ''' input: L.reuteri", "= 'Lreu_ca' bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report =", "pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_')", "import pandas as pd os.chdir('../../ComplementaryData/Step_02_DraftModels/') case = 'other' #'first' or 'other' # %%", "combine met according report My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel) return covermemodel Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml') Lreu_ca_gp.description = 'GEM", "%% <build> if case =='frist': #Gram positive os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o", "<standstandardlization> def CarveMe_processing(covermemodel): #change gene id 'G_id' for gen in covermemodel.genes: gen.id =" ]
[ "cal_rf_wh(grad_input): binary_map: np.ndarray = (grad_input[:, :] > 0.0) x_cs: np.ndarray = binary_map.sum(-1) >=", "module.eval() input = torch.ones(1, 3, 640, 640, requires_grad= True) model.zero_grad() features = model(input)", "#grad_input = grad_input * 100 width, height = cal_rf_wh(grad_input) print(\"width:\", width, \"height:\", height)", "binary_map.sum(0) >= 1 width = x_cs.sum() height = y_cs.sum() return (width, height) def", "g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :, h, w] = 1 x.backward(g_x, retain_graph = True) #", "for i in range(len(features)): # if i != len(features)-1: # continue x =", "except Exception as e: pass if type(module) is nn.BatchNorm2d: module.eval() input = torch.ones(1,", "0) width, height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2) grad_input", "True) model.zero_grad() features = model(input) for i in range(len(features)): # if i !=", "continue x = features[i] #g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]]) g_x = torch.zeros_like(x)", "model.zero_grad() features = model(input) for i in range(len(features)): # if i != len(features)-1:", ":] > 0.0) x_cs: np.ndarray = binary_map.sum(-1) >= 1 y_cs: np.ndarray = binary_map.sum(0)", "binary_map.sum(-1) >= 1 y_cs: np.ndarray = binary_map.sum(0) >= 1 width = x_cs.sum() height", "grad_input.mean(0).mean(0) # 有效感受野 0.75 - 0.85 #grad_input = np.where(grad_input > 0.85,1,0) #grad_input_ =", "def show(input, i): grad_input = np.abs(input.grad.data.numpy()) grad_input = grad_input / np.max(grad_input) grad_input =", "torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input): binary_map: np.ndarray", "x = torch.mean(x, 1, keepdim=True) # fake_fp = x * g_x[0, 0, ...]", "= np.where(grad_input > 0.85,1,0) #grad_input_ = np.where(grad_input > 0.75, 1, grad_input) # effient_values", "\"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2) grad_input = (grad_input * 255).astype(np.uint8) cv.imshow(\"receip_field\"+str(i), grad_input) #cv.imwrite(\"./receip_field\"+str(i)+\".png\",", "height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2) grad_input = (grad_input", "# fake_fp = x * g_x[0, 0, ...] # fake_loss = torch.mean(fake_fp) #", "> 0.0) x_cs: np.ndarray = binary_map.sum(-1) >= 1 y_cs: np.ndarray = binary_map.sum(0) >=", "= True) # x = torch.mean(x, 1, keepdim=True) # fake_fp = x *", "grad_input > 0.0 # samll_effient_values = grad_input <= 0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)] =", "# fake_loss = torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def", "range(len(features)): # if i != len(features)-1: # continue x = features[i] #g_x =", "# if i != len(features)-1: # continue x = features[i] #g_x = torch.zeros(size=[1,", "* g_x[0, 0, ...] # fake_loss = torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad()", "有效感受野 0.75 - 0.85 #grad_input = np.where(grad_input > 0.85,1,0) #grad_input_ = np.where(grad_input >", "def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module in model.modules(): try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var)", "w] = 1 x.backward(g_x, retain_graph = True) # x = torch.mean(x, 1, keepdim=True)", "0, ...] # fake_loss = torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000)", "height = y_cs.sum() return (width, height) def show(input, i): grad_input = np.abs(input.grad.data.numpy()) grad_input", "y_cs: np.ndarray = binary_map.sum(0) >= 1 width = x_cs.sum() height = y_cs.sum() return", "x * g_x[0, 0, ...] # fake_loss = torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input, i)", "# continue x = features[i] #g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]]) g_x =", "* 100 width, height = cal_rf_wh(grad_input) print(\"width:\", width, \"height:\", height) grad_input_ERF = np.where(grad_input>0.01,", "requires_grad= True) model.zero_grad() features = model(input) for i in range(len(features)): # if i", "= grad_input * 100 width, height = cal_rf_wh(grad_input) print(\"width:\", width, \"height:\", height) grad_input_ERF", "= grad_input > 0.0 # samll_effient_values = grad_input <= 0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)]", "# fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input): binary_map: np.ndarray =", "= torch.ones(1, 3, 640, 640, requires_grad= True) model.zero_grad() features = model(input) for i", "grad_input) # effient_values = grad_input > 0.0 # samll_effient_values = grad_input <= 0.2", "print(\"width:\", width, \"height:\", height) grad_input_ERF = np.where(grad_input>0.01, 1, 0) width, height = cal_rf_wh(grad_input_ERF)", "grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1 #grad_input = grad_input * 100 width, height = cal_rf_wh(grad_input)", "nn.BatchNorm2d: module.eval() input = torch.ones(1, 3, 640, 640, requires_grad= True) model.zero_grad() features =", "1, grad_input) # effient_values = grad_input > 0.0 # samll_effient_values = grad_input <=", "import cv2 as cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module in model.modules(): try: nn.init.constant_(module.weight,", "# effient_values = grad_input > 0.0 # samll_effient_values = grad_input <= 0.2 #", "= binary_map.sum(0) >= 1 width = x_cs.sum() height = y_cs.sum() return (width, height)", "0.1 #grad_input = grad_input * 100 width, height = cal_rf_wh(grad_input) print(\"width:\", width, \"height:\",", "# grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1 #grad_input = grad_input * 100 width, height =", "= y_cs.sum() return (width, height) def show(input, i): grad_input = np.abs(input.grad.data.numpy()) grad_input =", "x = features[i] #g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]]) g_x = torch.zeros_like(x) h,", "h, w] = 1 x.backward(g_x, retain_graph = True) # x = torch.mean(x, 1,", "= np.where(grad_input>0.01, 1, 0) width, height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\", height) np.expand_dims(grad_input,", "= torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input): binary_map:", "e: pass if type(module) is nn.BatchNorm2d: module.eval() input = torch.ones(1, 3, 640, 640,", "...] # fake_loss = torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0)", "= g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :, h, w] = 1 x.backward(g_x, retain_graph = True)", "= torch.zeros(size=[1, 1, x.shape[2], x.shape[3]]) g_x = torch.zeros_like(x) h, w = g_x.shape[2]//2, g_x.shape[3]//2", "cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2) grad_input = (grad_input * 255).astype(np.uint8)", "features[i] #g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]]) g_x = torch.zeros_like(x) h, w =", "model(input) for i in range(len(features)): # if i != len(features)-1: # continue x", "0.0) x_cs: np.ndarray = binary_map.sum(-1) >= 1 y_cs: np.ndarray = binary_map.sum(0) >= 1", "100 width, height = cal_rf_wh(grad_input) print(\"width:\", width, \"height:\", height) grad_input_ERF = np.where(grad_input>0.01, 1,", "> 0.75, 1, grad_input) # effient_values = grad_input > 0.0 # samll_effient_values =", "(grad_input[:, :] > 0.0) x_cs: np.ndarray = binary_map.sum(-1) >= 1 y_cs: np.ndarray =", "model.modules(): try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception as e: pass if", "g_x = torch.zeros_like(x) h, w = g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :, h, w] =", "fake_loss = torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input):", "h, w = g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :, h, w] = 1 x.backward(g_x, retain_graph", "= binary_map.sum(-1) >= 1 y_cs: np.ndarray = binary_map.sum(0) >= 1 width = x_cs.sum()", "nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception as e: pass if type(module) is", "import torch import numpy as np import cv2 as cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野", "= 0.1 #grad_input = grad_input * 100 width, height = cal_rf_wh(grad_input) print(\"width:\", width,", "width, \"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2) grad_input = (grad_input * 255).astype(np.uint8) cv.imshow(\"receip_field\"+str(i), grad_input)", "> 0.0 # samll_effient_values = grad_input <= 0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1", "np.ndarray = binary_map.sum(0) >= 1 width = x_cs.sum() height = y_cs.sum() return (width,", "if type(module) is nn.BatchNorm2d: module.eval() input = torch.ones(1, 3, 640, 640, requires_grad= True)", "np.ndarray = (grad_input[:, :] > 0.0) x_cs: np.ndarray = binary_map.sum(-1) >= 1 y_cs:", "nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception as e: pass if type(module) is nn.BatchNorm2d: module.eval() input", "return (width, height) def show(input, i): grad_input = np.abs(input.grad.data.numpy()) grad_input = grad_input /", "0.75 - 0.85 #grad_input = np.where(grad_input > 0.85,1,0) #grad_input_ = np.where(grad_input > 0.75,", "#g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]]) g_x = torch.zeros_like(x) h, w = g_x.shape[2]//2,", "1, 0) width, height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2)", "#grad_input = np.where(grad_input > 0.85,1,0) #grad_input_ = np.where(grad_input > 0.75, 1, grad_input) #", "try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception as e: pass if type(module)", "0.85 #grad_input = np.where(grad_input > 0.85,1,0) #grad_input_ = np.where(grad_input > 0.75, 1, grad_input)", "> 0.85,1,0) #grad_input_ = np.where(grad_input > 0.75, 1, grad_input) # effient_values = grad_input", "input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input): binary_map: np.ndarray = (grad_input[:, :] > 0.0) x_cs:", "for module in model.modules(): try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception as", "= grad_input.mean(0).mean(0) # 有效感受野 0.75 - 0.85 #grad_input = np.where(grad_input > 0.85,1,0) #grad_input_", "640, 640, requires_grad= True) model.zero_grad() features = model(input) for i in range(len(features)): #", "# 有效感受野 0.75 - 0.85 #grad_input = np.where(grad_input > 0.85,1,0) #grad_input_ = np.where(grad_input", "input = torch.ones(1, 3, 640, 640, requires_grad= True) model.zero_grad() features = model(input) for", "grad_input <= 0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1 #grad_input = grad_input * 100", "torch.ones(1, 3, 640, 640, requires_grad= True) model.zero_grad() features = model(input) for i in", "# samll_effient_values = grad_input <= 0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1 #grad_input =", "0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1 #grad_input = grad_input * 100 width, height", "numpy as np import cv2 as cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module in", "nn import torch import numpy as np import cv2 as cv def calculate_EPR(model):", "width, \"height:\", height) grad_input_ERF = np.where(grad_input>0.01, 1, 0) width, height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\",", "grad_input_ERF = np.where(grad_input>0.01, 1, 0) width, height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\", height)", "torch import numpy as np import cv2 as cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for", "show(input, i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input): binary_map: np.ndarray = (grad_input[:, :]", "- 0.85 #grad_input = np.where(grad_input > 0.85,1,0) #grad_input_ = np.where(grad_input > 0.75, 1,", "nn.init.ones_(module.running_var) except Exception as e: pass if type(module) is nn.BatchNorm2d: module.eval() input =", "#grad_input_ = np.where(grad_input > 0.75, 1, grad_input) # effient_values = grad_input > 0.0", "0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception as e: pass if type(module) is nn.BatchNorm2d:", "1 y_cs: np.ndarray = binary_map.sum(0) >= 1 width = x_cs.sum() height = y_cs.sum()", "np.where(grad_input>0.01, 1, 0) width, height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3,", "y_cs.sum() return (width, height) def show(input, i): grad_input = np.abs(input.grad.data.numpy()) grad_input = grad_input", "def cal_rf_wh(grad_input): binary_map: np.ndarray = (grad_input[:, :] > 0.0) x_cs: np.ndarray = binary_map.sum(-1)", "\"height:\", height) grad_input_ERF = np.where(grad_input>0.01, 1, 0) width, height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width,", "i): grad_input = np.abs(input.grad.data.numpy()) grad_input = grad_input / np.max(grad_input) grad_input = grad_input.mean(0).mean(0) #", "= cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2) grad_input = (grad_input *", "len(features)-1: # continue x = features[i] #g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]]) g_x", "import numpy as np import cv2 as cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module", "= features[i] #g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]]) g_x = torch.zeros_like(x) h, w", "cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module in model.modules(): try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean)", "features = model(input) for i in range(len(features)): # if i != len(features)-1: #", "1, keepdim=True) # fake_fp = x * g_x[0, 0, ...] # fake_loss =", "keepdim=True) # fake_fp = x * g_x[0, 0, ...] # fake_loss = torch.mean(fake_fp)", "x.shape[2], x.shape[3]]) g_x = torch.zeros_like(x) h, w = g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :, h,", "0.75, 1, grad_input) # effient_values = grad_input > 0.0 # samll_effient_values = grad_input", "g_x[0, 0, ...] # fake_loss = torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad() input.grad.data.zero_()", "is nn.BatchNorm2d: module.eval() input = torch.ones(1, 3, 640, 640, requires_grad= True) model.zero_grad() features", "= torch.zeros_like(x) h, w = g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :, h, w] = 1", "fake_fp = x * g_x[0, 0, ...] # fake_loss = torch.mean(fake_fp) # fake_loss.backward(retain_graph=True)", "(width, height) def show(input, i): grad_input = np.abs(input.grad.data.numpy()) grad_input = grad_input / np.max(grad_input)", "0.85,1,0) #grad_input_ = np.where(grad_input > 0.75, 1, grad_input) # effient_values = grad_input >", "nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception as e: pass if type(module) is nn.BatchNorm2d: module.eval()", "g_x.shape[3]//2 g_x[:, :, h, w] = 1 x.backward(g_x, retain_graph = True) # x", "np.where(grad_input > 0.85,1,0) #grad_input_ = np.where(grad_input > 0.75, 1, grad_input) # effient_values =", "= x_cs.sum() height = y_cs.sum() return (width, height) def show(input, i): grad_input =", "= (grad_input[:, :] > 0.0) x_cs: np.ndarray = binary_map.sum(-1) >= 1 y_cs: np.ndarray", ":, h, w] = 1 x.backward(g_x, retain_graph = True) # x = torch.mean(x,", "np.where(grad_input > 0.75, 1, grad_input) # effient_values = grad_input > 0.0 # samll_effient_values", "in model.modules(): try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception as e: pass", "width = x_cs.sum() height = y_cs.sum() return (width, height) def show(input, i): grad_input", "grad_input = grad_input / np.max(grad_input) grad_input = grad_input.mean(0).mean(0) # 有效感受野 0.75 - 0.85", "type(module) is nn.BatchNorm2d: module.eval() input = torch.ones(1, 3, 640, 640, requires_grad= True) model.zero_grad()", "# x = torch.mean(x, 1, keepdim=True) # fake_fp = x * g_x[0, 0,", "3, 640, 640, requires_grad= True) model.zero_grad() features = model(input) for i in range(len(features)):", ">= 1 y_cs: np.ndarray = binary_map.sum(0) >= 1 width = x_cs.sum() height =", "samll_effient_values = grad_input <= 0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1 #grad_input = grad_input", "samll_effient_values)] = 0.1 #grad_input = grad_input * 100 width, height = cal_rf_wh(grad_input) print(\"width:\",", "as nn import torch import numpy as np import cv2 as cv def", "= grad_input <= 0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1 #grad_input = grad_input *", "cv2 as cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module in model.modules(): try: nn.init.constant_(module.weight, 0.05)", "cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input): binary_map: np.ndarray = (grad_input[:, :] > 0.0) x_cs: np.ndarray", "pass if type(module) is nn.BatchNorm2d: module.eval() input = torch.ones(1, 3, 640, 640, requires_grad=", "True) # x = torch.mean(x, 1, keepdim=True) # fake_fp = x * g_x[0,", "0.0 # samll_effient_values = grad_input <= 0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1 #grad_input", "w = g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :, h, w] = 1 x.backward(g_x, retain_graph =", "= cal_rf_wh(grad_input) print(\"width:\", width, \"height:\", height) grad_input_ERF = np.where(grad_input>0.01, 1, 0) width, height", "print(\"ERF_width:\", width, \"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2) grad_input = (grad_input * 255).astype(np.uint8) cv.imshow(\"receip_field\"+str(i),", "!= len(features)-1: # continue x = features[i] #g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]])", "model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input): binary_map: np.ndarray = (grad_input[:, :] > 0.0)", "grad_input / np.max(grad_input) grad_input = grad_input.mean(0).mean(0) # 有效感受野 0.75 - 0.85 #grad_input =", "height) grad_input_ERF = np.where(grad_input>0.01, 1, 0) width, height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\",", "= grad_input / np.max(grad_input) grad_input = grad_input.mean(0).mean(0) # 有效感受野 0.75 - 0.85 #grad_input", "x_cs: np.ndarray = binary_map.sum(-1) >= 1 y_cs: np.ndarray = binary_map.sum(0) >= 1 width", "i != len(features)-1: # continue x = features[i] #g_x = torch.zeros(size=[1, 1, x.shape[2],", "calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module in model.modules(): try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except", "i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input): binary_map: np.ndarray = (grad_input[:, :] >", "effient_values = grad_input > 0.0 # samll_effient_values = grad_input <= 0.2 # grad_input[np.logical_and(effient_values,", "i in range(len(features)): # if i != len(features)-1: # continue x = features[i]", "#TODO:尝试通过加载预训练权重计算有效感受野 for module in model.modules(): try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception", "width, height = cal_rf_wh(grad_input) print(\"width:\", width, \"height:\", height) grad_input_ERF = np.where(grad_input>0.01, 1, 0)", "g_x[:, :, h, w] = 1 x.backward(g_x, retain_graph = True) # x =", "fake_loss.backward(retain_graph=True) show(input, i) model.zero_grad() input.grad.data.zero_() cv.waitKey(2000) cv.waitKey(0) def cal_rf_wh(grad_input): binary_map: np.ndarray = (grad_input[:,", ">= 1 width = x_cs.sum() height = y_cs.sum() return (width, height) def show(input,", "height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2) grad_input = (grad_input * 255).astype(np.uint8) cv.imshow(\"receip_field\"+str(i), grad_input) #cv.imwrite(\"./receip_field\"+str(i)+\".png\", grad_input)", "torch.zeros(size=[1, 1, x.shape[2], x.shape[3]]) g_x = torch.zeros_like(x) h, w = g_x.shape[2]//2, g_x.shape[3]//2 g_x[:,", "grad_input = np.abs(input.grad.data.numpy()) grad_input = grad_input / np.max(grad_input) grad_input = grad_input.mean(0).mean(0) # 有效感受野", "<= 0.2 # grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1 #grad_input = grad_input * 100 width,", "as np import cv2 as cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module in model.modules():", "torch.zeros_like(x) h, w = g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :, h, w] = 1 x.backward(g_x,", "retain_graph = True) # x = torch.mean(x, 1, keepdim=True) # fake_fp = x", "= x * g_x[0, 0, ...] # fake_loss = torch.mean(fake_fp) # fake_loss.backward(retain_graph=True) show(input,", "cv.waitKey(0) def cal_rf_wh(grad_input): binary_map: np.ndarray = (grad_input[:, :] > 0.0) x_cs: np.ndarray =", "640, requires_grad= True) model.zero_grad() features = model(input) for i in range(len(features)): # if", "x_cs.sum() height = y_cs.sum() return (width, height) def show(input, i): grad_input = np.abs(input.grad.data.numpy())", "np.abs(input.grad.data.numpy()) grad_input = grad_input / np.max(grad_input) grad_input = grad_input.mean(0).mean(0) # 有效感受野 0.75 -", "/ np.max(grad_input) grad_input = grad_input.mean(0).mean(0) # 有效感受野 0.75 - 0.85 #grad_input = np.where(grad_input", "grad_input * 100 width, height = cal_rf_wh(grad_input) print(\"width:\", width, \"height:\", height) grad_input_ERF =", "Exception as e: pass if type(module) is nn.BatchNorm2d: module.eval() input = torch.ones(1, 3,", "as e: pass if type(module) is nn.BatchNorm2d: module.eval() input = torch.ones(1, 3, 640,", "= np.where(grad_input > 0.75, 1, grad_input) # effient_values = grad_input > 0.0 #", "height = cal_rf_wh(grad_input) print(\"width:\", width, \"height:\", height) grad_input_ERF = np.where(grad_input>0.01, 1, 0) width,", "cal_rf_wh(grad_input) print(\"width:\", width, \"height:\", height) grad_input_ERF = np.where(grad_input>0.01, 1, 0) width, height =", "grad_input = grad_input.mean(0).mean(0) # 有效感受野 0.75 - 0.85 #grad_input = np.where(grad_input > 0.85,1,0)", "import torch.nn as nn import torch import numpy as np import cv2 as", "if i != len(features)-1: # continue x = features[i] #g_x = torch.zeros(size=[1, 1,", "= 1 x.backward(g_x, retain_graph = True) # x = torch.mean(x, 1, keepdim=True) #", "module in model.modules(): try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias) nn.init.zeros_(module.running_mean) nn.init.ones_(module.running_var) except Exception as e:", "np import cv2 as cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module in model.modules(): try:", "show(input, i): grad_input = np.abs(input.grad.data.numpy()) grad_input = grad_input / np.max(grad_input) grad_input = grad_input.mean(0).mean(0)", "width, height = cal_rf_wh(grad_input_ERF) print(\"ERF_width:\", width, \"ERF_height:\", height) np.expand_dims(grad_input, axis=2).repeat(3, axis=2) grad_input =", "= torch.mean(x, 1, keepdim=True) # fake_fp = x * g_x[0, 0, ...] #", "1, x.shape[2], x.shape[3]]) g_x = torch.zeros_like(x) h, w = g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :,", "= np.abs(input.grad.data.numpy()) grad_input = grad_input / np.max(grad_input) grad_input = grad_input.mean(0).mean(0) # 有效感受野 0.75", "= model(input) for i in range(len(features)): # if i != len(features)-1: # continue", "as cv def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野 for module in model.modules(): try: nn.init.constant_(module.weight, 0.05) nn.init.zeros_(module.bias)", "height) def show(input, i): grad_input = np.abs(input.grad.data.numpy()) grad_input = grad_input / np.max(grad_input) grad_input", "in range(len(features)): # if i != len(features)-1: # continue x = features[i] #g_x", "x.shape[3]]) g_x = torch.zeros_like(x) h, w = g_x.shape[2]//2, g_x.shape[3]//2 g_x[:, :, h, w]", "1 width = x_cs.sum() height = y_cs.sum() return (width, height) def show(input, i):", "binary_map: np.ndarray = (grad_input[:, :] > 0.0) x_cs: np.ndarray = binary_map.sum(-1) >= 1", "torch.nn as nn import torch import numpy as np import cv2 as cv", "np.ndarray = binary_map.sum(-1) >= 1 y_cs: np.ndarray = binary_map.sum(0) >= 1 width =", "1 x.backward(g_x, retain_graph = True) # x = torch.mean(x, 1, keepdim=True) # fake_fp", "torch.mean(x, 1, keepdim=True) # fake_fp = x * g_x[0, 0, ...] # fake_loss", "np.max(grad_input) grad_input = grad_input.mean(0).mean(0) # 有效感受野 0.75 - 0.85 #grad_input = np.where(grad_input >", "x.backward(g_x, retain_graph = True) # x = torch.mean(x, 1, keepdim=True) # fake_fp =" ]
[]
[ "mypy expects object to be here at the last index ('we skip \"object\"", "def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type: folded_type = self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance): folded_type.args", "def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver() signature = context.default_signature signature.ret_type =", "import mypy.errorcodes import mypy.errors import mypy.nodes import mypy.options import mypy.plugin import mypy.types if", "import TypeGuard else: # pragma: no cover from typing_extensions import TypeGuard SignatureContext =", "mypy.types.Type]]: if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return None def get_method_signature_hook( self, fullname:", "arg) return intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None: name_expr =", "defn.info.is_protocol = True type_info = mypy.nodes.TypeInfo( names=symbol_table if symbol_table is not None else", "-> int: # pylint: disable=useless-super-delegation return super().__hash__() def mk_protocol_intersection_typeinfo( name: str, *, #", "isinstance(folded_type, mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t) for t in folded_type.args) return folded_type def fold_intersection(self,", "# pylint: disable=unused-argument def get_type_analyze_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if", "type.\"\"\" def __eq__(self, x: object) -> bool: if isinstance(x, IncomparableTypeName): return False return", "ProtocolIntersections to not be treated as the same type, but just as protocols,", "intersections_to_process = deque([type_]) while intersections_to_process: intersection = intersections_to_process.popleft() for arg in intersection.args: if", "= deque([type_]) while intersections_to_process: intersection = intersections_to_process.popleft() for arg in intersection.args: if self._is_intersection(arg):", "typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver()", "module_name=\"typing_protocol_intersection\", ) type_info.mro = [type_info] type_info.is_protocol = True return type_info class ProtocolIntersectionResolver: def", ") -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook def get_function_signature_hook( self, fullname: str ) ->", "to differ - that's it's an IncomparableTypeName. fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] = None,", "type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, [])) args = [mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes]", "in signature.arg_types] return signature def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext)", "mypy.types.Type: if not self._is_intersection(type_): return type_ type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper", "import Callable, Optional import mypy.errorcodes import mypy.errors import mypy.nodes import mypy.options import mypy.plugin", "def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args = tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args) symbol_table", "import mypy.errors import mypy.nodes import mypy.options import mypy.plugin import mypy.types if sys.version_info >=", "def get_function_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple):", "mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver() signature = context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t)", "folded_type.args) return folded_type def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type: if not self._is_intersection(type_): return", "signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types] return signature def", "import mypy.options import mypy.plugin import mypy.types if sys.version_info >= (3, 10): # pragma:", "returns True when compared (equality) with another instance of this type.\"\"\" def __eq__(self,", "signature = context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types]", "disable=unused-argument def get_type_analyze_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname ==", "self, fullname: str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo", "fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, [])) args = [mypy.types.Instance(ti, []) for ti", "intersections_to_process.popleft() for arg in intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg) continue if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper,", "mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], ) defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol = True type_info = mypy.nodes.TypeInfo(", "type, but just as protocols, # their fullnames need to differ - that's", "treated as the same type, but just as protocols, # their fullnames need", "None else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro = [type_info] type_info.is_protocol = True return", "implements it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], ) defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol = True type_info", "if isinstance(x, IncomparableTypeName): return False return super().__eq__(x) def __hash__(self) -> int: # pylint:", "False return super().__eq__(x) def __hash__(self) -> int: # pylint: disable=useless-super-delegation return super().__hash__() def", "_type_analyze_hook def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]: # ignore version argument if the plugin", "x: object) -> bool: if isinstance(x, IncomparableTypeName): return False return super().__eq__(x) def __hash__(self)", "pragma: no cover from typing_extensions import TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin):", "@staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None: name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node =", "type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args = tuple(context.api.analyze_type(arg_t)", "for arg_t in context.type.args) symbol_table = mypy.nodes.SymbolTable() for arg in args: if isinstance(arg,", "mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A string that never returns True when compared", "mypy.options import mypy.plugin import mypy.types if sys.version_info >= (3, 10): # pragma: no", "True when compared (equality) with another instance of this type.\"\"\" def __eq__(self, x:", "args = tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args) symbol_table = mypy.nodes.SymbolTable() for arg in", "type_: mypy.types.Type) -> mypy.types.Type: folded_type = self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t)", "Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return None def get_method_signature_hook( self,", "defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro = [type_info] type_info.is_protocol = True return type_info class ProtocolIntersectionResolver:", "type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process = deque([type_]) while intersections_to_process: intersection =", "name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return isinstance(typ,", "args=args) def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process = deque([type_]) while", "mypy.errors import mypy.nodes import mypy.options import mypy.plugin import mypy.types if sys.version_info >= (3,", "base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects object to be here at the last index", "intersections_to_process: intersection = intersections_to_process.popleft() for arg in intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg) continue if", "def get_type_analyze_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\":", "= typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def _is_intersection(typ: mypy.types.Type) ->", "[]) for ti in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper:", "Protocols can be used in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo(", "is not None else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro = [type_info] type_info.is_protocol =", "intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance)", "with another instance of this type.\"\"\" def __eq__(self, x: object) -> bool: if", "= ProtocolIntersectionResolver() signature = context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t", "str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args = tuple(context.api.analyze_type(arg_t) for", "type_info = mypy.nodes.TypeInfo( names=symbol_table if symbol_table is not None else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\",", ">= (3, 10): # pragma: no cover from typing import TypeGuard else: #", "here at the last index ('we skip \"object\" since everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"),", "intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process = deque([type_]) while intersections_to_process: intersection = intersections_to_process.popleft() for", "code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return mypy.types.Instance(type_info, args,", "( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver() signature =", "typing.Type[mypy.plugin.Plugin]: # ignore version argument if the plugin works with all mypy versions.", "def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args =", "== ( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver() signature", "Optional[mypy.nodes.SymbolTable] = None, ) -> mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"),", "name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects object to be here at the", "class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A string that never returns", "-> mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver() signature = context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types =", "mypy.errorcodes import mypy.errors import mypy.nodes import mypy.options import mypy.plugin import mypy.types if sys.version_info", "def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process = deque([type_]) while intersections_to_process:", "be used in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname),", "10): # pragma: no cover from typing import TypeGuard else: # pragma: no", "ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return", "pragma: no cover from typing import TypeGuard else: # pragma: no cover from", "type_analyze_hook(fullname) return None def get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return", "return False return super().__eq__(x) def __hash__(self) -> int: # pylint: disable=useless-super-delegation return super().__hash__()", "for t in signature.arg_types] return signature def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def", "= mypy.nodes.TypeInfo( names=symbol_table if symbol_table is not None else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", )", "arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return mypy.types.Instance(type_info,", "typ.type) @staticmethod def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance) and typ.type.fullname ==", "folded_type.args = tuple(self.fold_intersection(t) for t in folded_type.args) return folded_type def fold_intersection(self, type_: mypy.types.Type)", "intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return", "intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance) and typ.type.fullname", "mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument def get_type_analyze_hook( self, fullname: str ) ->", "= mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, [])) args = [mypy.types.Instance(ti,", "[])) args = [mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args) def", "if self._is_intersection(arg): intersections_to_process.append(arg) continue if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper @staticmethod def", "and typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike: resolver =", "mypy.nodes import mypy.options import mypy.plugin import mypy.types if sys.version_info >= (3, 10): #", "self, fullname: str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname)", "index ('we skip \"object\" since everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], ) defn.fullname", "ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type: folded_type = self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance):", "tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args) symbol_table = mypy.nodes.SymbolTable() for arg in args: if", "defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects object to be here at the last", "same type, but just as protocols, # their fullnames need to differ -", "= [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types] return signature def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext],", "of this type.\"\"\" def __eq__(self, x: object) -> bool: if isinstance(x, IncomparableTypeName): return", "= context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types] return", "the last index ('we skip \"object\" since everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[],", "True type_info = mypy.nodes.TypeInfo( names=symbol_table if symbol_table is not None else mypy.nodes.SymbolTable(), defn=defn,", "base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A string that never returns True when compared (equality)", "fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return None def get_method_signature_hook( self, fullname: str )", "IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] = None, ) -> mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]),", "symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column)", "compared (equality) with another instance of this type.\"\"\" def __eq__(self, x: object) ->", "protocols, # their fullnames need to differ - that's it's an IncomparableTypeName. fullname:", ") -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return None def", "\"object\" since everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], ) defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol", "if not self._is_intersection(type_): return type_ type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper =", "ignore version argument if the plugin works with all mypy versions. return ProtocolIntersectionPlugin", "context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types] return signature", "TypeInfoWrapper: intersections_to_process = deque([type_]) while intersections_to_process: intersection = intersections_to_process.popleft() for arg in intersection.args:", "-> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook def get_function_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext],", "mypy.types if sys.version_info >= (3, 10): # pragma: no cover from typing import", "import TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument def get_type_analyze_hook(", "str) -> typing.Type[mypy.plugin.Plugin]: # ignore version argument if the plugin works with all", "not self._is_intersection(type_): return type_ type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper = self._run_fold(type_,", "in args: if isinstance(arg, mypy.types.Instance): if not arg.type.is_protocol: context.api.fail( \"Only Protocols can be", "[resolver.fold_intersection_and_its_args(t) for t in signature.arg_types] return signature def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]:", "= mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects object to be here", "mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance) and typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def", "expects object to be here at the last index ('we skip \"object\" since", "mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args = tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args) symbol_table = mypy.nodes.SymbolTable()", "object to be here at the last index ('we skip \"object\" since everyone", "ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument def get_type_analyze_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]:", "resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types] return signature def type_analyze_hook(fullname: str)", "for arg in args: if isinstance(arg, mypy.types.Instance): if not arg.type.is_protocol: context.api.fail( \"Only Protocols", "TypeInfoWrapper(type_info, [])) args = [mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args)", "= None, ) -> mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), #", "mypy.types.Type) -> mypy.types.Type: folded_type = self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t) for", "-> bool: if isinstance(x, IncomparableTypeName): return False return super().__eq__(x) def __hash__(self) -> int:", "str, *, # For ProtocolIntersections to not be treated as the same type,", "differ - that's it's an IncomparableTypeName. fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] = None, )", "Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook def get_function_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]:", "True return type_info class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type: folded_type =", "typ: mypy.types.Instance) -> None: name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0,", "used in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table", "(equality) with another instance of this type.\"\"\" def __eq__(self, x: object) -> bool:", "TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A string that never returns True", "-> mypy.types.Type: args = tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args) symbol_table = mypy.nodes.SymbolTable() for", "name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def _is_intersection(typ: mypy.types.Type)", "arg.type.is_protocol: context.api.fail( \"Only Protocols can be used in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names)", "disable=useless-super-delegation return super().__hash__() def mk_protocol_intersection_typeinfo( name: str, *, # For ProtocolIntersections to not", "(3, 10): # pragma: no cover from typing import TypeGuard else: # pragma:", "the same type, but just as protocols, # their fullnames need to differ", "def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None: name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type", "if isinstance(arg, mypy.types.Instance): if not arg.type.is_protocol: context.api.fail( \"Only Protocols can be used in", "mypy.types.Instance) -> None: name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type)", "Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args = tuple(context.api.analyze_type(arg_t) for arg_t in", "mypy.plugin import mypy.types if sys.version_info >= (3, 10): # pragma: no cover from", "IncomparableTypeName(str): \"\"\"A string that never returns True when compared (equality) with another instance", "not arg.type.is_protocol: context.api.fail( \"Only Protocols can be used in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE )", "IncomparableTypeName. fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] = None, ) -> mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef(", "intersections_to_process.append(arg) continue if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper,", "return _type_analyze_hook def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]: # ignore version argument if the", "_add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None: name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0,", "-> typing.Type[mypy.plugin.Plugin]: # ignore version argument if the plugin works with all mypy", "class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument def get_type_analyze_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext],", "= resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types] return signature def type_analyze_hook(fullname:", "return intersection_function_signature_hook def get_function_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook", "no cover from typing_extensions import TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): #", "no cover from typing import TypeGuard else: # pragma: no cover from typing_extensions", "since everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], ) defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol =", "typing from collections import deque from typing import Callable, Optional import mypy.errorcodes import", "return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column) return _type_analyze_hook def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]: #", ") -> mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects", "else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro = [type_info] type_info.is_protocol = True return type_info", "but just as protocols, # their fullnames need to differ - that's it's", "self._run_fold(type_, TypeInfoWrapper(type_info, [])) args = [mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info,", "= typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument def get_type_analyze_hook( self, fullname: str", "be here at the last index ('we skip \"object\" since everyone implements it')", "import mypy.plugin import mypy.types if sys.version_info >= (3, 10): # pragma: no cover", "def get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook def get_function_signature_hook(", "last index ('we skip \"object\" since everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], )", "# their fullnames need to differ - that's it's an IncomparableTypeName. fullname: IncomparableTypeName,", "-> mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects object", "intersection_function_signature_hook def get_function_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook class", "name: str, *, # For ProtocolIntersections to not be treated as the same", "when compared (equality) with another instance of this type.\"\"\" def __eq__(self, x: object)", "arg in args: if isinstance(arg, mypy.types.Instance): if not arg.type.is_protocol: context.api.fail( \"Only Protocols can", "TypeInfoWrapper, typ: mypy.types.Instance) -> None: name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr)", "need to differ - that's it's an IncomparableTypeName. fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] =", "mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects object to be here at", "intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver() signature = context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type)", "folded_type def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type: if not self._is_intersection(type_): return type_ type_info", "type_info.mro = [type_info] type_info.is_protocol = True return type_info class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_:", "IncomparableTypeName): return False return super().__eq__(x) def __hash__(self) -> int: # pylint: disable=useless-super-delegation return", "return None def get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook", "super().__eq__(x) def __hash__(self) -> int: # pylint: disable=useless-super-delegation return super().__hash__() def mk_protocol_intersection_typeinfo( name:", "as the same type, but just as protocols, # their fullnames need to", "typing import Callable, Optional import mypy.errorcodes import mypy.errors import mypy.nodes import mypy.options import", "arg in intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg) continue if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return", "deque([type_]) while intersections_to_process: intersection = intersections_to_process.popleft() for arg in intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg)", "def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance) and typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\"", "signature.arg_types] return signature def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) ->", "symbol_table: Optional[mypy.nodes.SymbolTable] = None, ) -> mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[", "mypy.types.Type) -> mypy.types.Type: if not self._is_intersection(type_): return type_ type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"),", "import mypy.types if sys.version_info >= (3, 10): # pragma: no cover from typing", "name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod", "int: # pylint: disable=useless-super-delegation return super().__hash__() def mk_protocol_intersection_typeinfo( name: str, *, # For", "-> mypy.types.Type: folded_type = self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t) for t", "# For ProtocolIntersections to not be treated as the same type, but just", "intersection = intersections_to_process.popleft() for arg in intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg) continue if isinstance(arg,", "def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]: # ignore version argument if the plugin works", "typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance) and", ") type_info.mro = [type_info] type_info.is_protocol = True return type_info class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self,", "return type_info class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type: folded_type = self.fold_intersection(type_)", "to be here at the last index ('we skip \"object\" since everyone implements", "Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A", "super().__hash__() def mk_protocol_intersection_typeinfo( name: str, *, # For ProtocolIntersections to not be treated", "if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance)", "skip \"object\" since everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], ) defn.fullname = IncomparableTypeName(fullname)", "\"\"\"A string that never returns True when compared (equality) with another instance of", "as protocols, # their fullnames need to differ - that's it's an IncomparableTypeName.", "type_ type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, [])) args", "symbol_table is not None else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro = [type_info] type_info.is_protocol", "mypy.types.FunctionLike]]: return intersection_function_signature_hook def get_function_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return", "], type_vars=[], ) defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol = True type_info = mypy.nodes.TypeInfo( names=symbol_table", ") -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class", "be treated as the same type, but just as protocols, # their fullnames", "for t in folded_type.args) return folded_type def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type: if", "type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A string that never returns True when", "tuple(self.fold_intersection(t) for t in folded_type.args) return folded_type def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type:", "return isinstance(typ, mypy.types.Instance) and typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context: SignatureContext) ->", "from typing import TypeGuard else: # pragma: no cover from typing_extensions import TypeGuard", "instance of this type.\"\"\" def __eq__(self, x: object) -> bool: if isinstance(x, IncomparableTypeName):", "Optional import mypy.errorcodes import mypy.errors import mypy.nodes import mypy.options import mypy.plugin import mypy.types", "for arg in intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg) continue if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg)", "-> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args = tuple(context.api.analyze_type(arg_t) for arg_t", "-> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return None def get_method_signature_hook(", "it's an IncomparableTypeName. fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] = None, ) -> mypy.nodes.TypeInfo: defn", "it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], ) defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol = True type_info =", "\"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, [])) args = [mypy.types.Instance(ti, []) for", "context.type.args) symbol_table = mypy.nodes.SymbolTable() for arg in args: if isinstance(arg, mypy.types.Instance): if not", "symbol_table = mypy.nodes.SymbolTable() for arg in args: if isinstance(arg, mypy.types.Instance): if not arg.type.is_protocol:", "get_function_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info:", "column=context.type.column) return _type_analyze_hook def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]: # ignore version argument if", "their fullnames need to differ - that's it's an IncomparableTypeName. fullname: IncomparableTypeName, symbol_table:", "type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, [])) args =", "import deque from typing import Callable, Optional import mypy.errorcodes import mypy.errors import mypy.nodes", "= mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def", "mypy.types.Type: folded_type = self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t) for t in", "in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table )", "class IncomparableTypeName(str): \"\"\"A string that never returns True when compared (equality) with another", "\"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver() signature = context.default_signature", "isinstance(arg, mypy.types.Instance): if not arg.type.is_protocol: context.api.fail( \"Only Protocols can be used in ProtocolIntersection.\",", "in folded_type.args) return folded_type def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type: if not self._is_intersection(type_):", "= self._run_fold(type_, TypeInfoWrapper(type_info, [])) args = [mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes] return", "\"Only Protocols can be used in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info =", ") def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver() signature = context.default_signature signature.ret_type", "typing_extensions import TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument def", "t in signature.arg_types] return signature def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context:", "pylint: disable=unused-argument def get_type_analyze_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname", "return super().__eq__(x) def __hash__(self) -> int: # pylint: disable=useless-super-delegation return super().__hash__() def mk_protocol_intersection_typeinfo(", "TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument def get_type_analyze_hook( self,", "mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column) return _type_analyze_hook def", "-> None: name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0,", "type_: mypy.types.Type) -> mypy.types.Type: if not self._is_intersection(type_): return type_ type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\",", "mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process = deque([type_])", "mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro = [type_info] type_info.is_protocol = True return type_info class", "('we skip \"object\" since everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], ) defn.fullname =", "context.api.fail( \"Only Protocols can be used in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info", "if not arg.type.is_protocol: context.api.fail( \"Only Protocols can be used in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE", "names=symbol_table if symbol_table is not None else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro =", "typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]:", "fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type: if not self._is_intersection(type_): return type_ type_info = mk_protocol_intersection_typeinfo(", "type_info = mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column) return", "mypy.nodes.SymbolTable() for arg in args: if isinstance(arg, mypy.types.Instance): if not arg.type.is_protocol: context.api.fail( \"Only", "mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects object to be here at the last index ('we", "that never returns True when compared (equality) with another instance of this type.\"\"\"", "from typing_extensions import TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument", "IncomparableTypeName(fullname) defn.info.is_protocol = True type_info = mypy.nodes.TypeInfo( names=symbol_table if symbol_table is not None", "= [mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self, type_:", "Callable, Optional import mypy.errorcodes import mypy.errors import mypy.nodes import mypy.options import mypy.plugin import", "type_vars=[], ) defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol = True type_info = mypy.nodes.TypeInfo( names=symbol_table if", "cover from typing import TypeGuard else: # pragma: no cover from typing_extensions import", "return signature def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type:", "mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args = tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args)", "args = [mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self,", "[mypy.types.Instance(ti, []) for ti in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self, type_: mypy.types.Instance,", "@staticmethod def _is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance) and typ.type.fullname == (", "an IncomparableTypeName. fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] = None, ) -> mypy.nodes.TypeInfo: defn =", "self, fullname: str ) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook def get_function_signature_hook( self, fullname:", "that's it's an IncomparableTypeName. fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] = None, ) -> mypy.nodes.TypeInfo:", "deque from typing import Callable, Optional import mypy.errorcodes import mypy.errors import mypy.nodes import", "= tuple(self.fold_intersection(t) for t in folded_type.args) return folded_type def fold_intersection(self, type_: mypy.types.Type) ->", "self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None: name_expr", "object) -> bool: if isinstance(x, IncomparableTypeName): return False return super().__eq__(x) def __hash__(self) ->", "def __hash__(self) -> int: # pylint: disable=useless-super-delegation return super().__hash__() def mk_protocol_intersection_typeinfo( name: str,", "args: if isinstance(arg, mypy.types.Instance): if not arg.type.is_protocol: context.api.fail( \"Only Protocols can be used", "plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]: # ignore version argument if the plugin works with", "class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type: folded_type = self.fold_intersection(type_) if isinstance(folded_type,", "self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t) for t in folded_type.args) return folded_type", "mypy.nodes.TypeInfo( names=symbol_table if symbol_table is not None else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro", "= [type_info] type_info.is_protocol = True return type_info class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_: mypy.types.Type)", "from typing import Callable, Optional import mypy.errorcodes import mypy.errors import mypy.nodes import mypy.options", "return intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None: name_expr = mypy.nodes.NameExpr(typ.type.name)", "mypy.types.FunctionLike]]: return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A string", "signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in signature.arg_types] return signature def type_analyze_hook(fullname: str) ->", "= mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column) return _type_analyze_hook", "string that never returns True when compared (equality) with another instance of this", "# pragma: no cover from typing import TypeGuard else: # pragma: no cover", "never returns True when compared (equality) with another instance of this type.\"\"\" def", "another instance of this type.\"\"\" def __eq__(self, x: object) -> bool: if isinstance(x,", "return mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process =", "intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None: name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node", "mk_protocol_intersection_typeinfo( name: str, *, # For ProtocolIntersections to not be treated as the", "self._is_intersection(type_): return type_ type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info,", "not None else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro = [type_info] type_info.is_protocol = True", ") return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column) return _type_analyze_hook def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]:", "TypeGuard else: # pragma: no cover from typing_extensions import TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext,", "isinstance(x, IncomparableTypeName): return False return super().__eq__(x) def __hash__(self) -> int: # pylint: disable=useless-super-delegation", "fullnames need to differ - that's it's an IncomparableTypeName. fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable]", "None def get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook def", ") symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo( context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return mypy.types.Instance(type_info, args, line=context.type.line,", "can be used in ProtocolIntersection.\", arg, code=mypy.errorcodes.VALID_TYPE ) symbol_table.update(arg.type.names) type_info = mk_protocol_intersection_typeinfo( context.type.name,", "*, # For ProtocolIntersections to not be treated as the same type, but", "mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects object to", "# pragma: no cover from typing_extensions import TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class", "sys.version_info >= (3, 10): # pragma: no cover from typing import TypeGuard else:", "TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance) and typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context: SignatureContext)", "for ti in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper)", "while intersections_to_process: intersection = intersections_to_process.popleft() for arg in intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg) continue", "For ProtocolIntersections to not be treated as the same type, but just as", "None, ) -> mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy", "== \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return None def get_method_signature_hook( self, fullname: str ) ->", "ProtocolIntersectionResolver() signature = context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t) for t in", "str ) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook def get_function_signature_hook( self, fullname: str )", "typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument def get_type_analyze_hook( self, fullname: str )", "defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol = True type_info = mypy.nodes.TypeInfo( names=symbol_table if symbol_table is", "in intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg) continue if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper", "# pylint: disable=useless-super-delegation return super().__hash__() def mk_protocol_intersection_typeinfo( name: str, *, # For ProtocolIntersections", "= True return type_info class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type: folded_type", "str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return None", "__hash__(self) -> int: # pylint: disable=useless-super-delegation return super().__hash__() def mk_protocol_intersection_typeinfo( name: str, *,", "SignatureContext) -> mypy.types.FunctionLike: resolver = ProtocolIntersectionResolver() signature = context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types", "not be treated as the same type, but just as protocols, # their", "args, line=context.type.line, column=context.type.column) return _type_analyze_hook def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]: # ignore version", "this type.\"\"\" def __eq__(self, x: object) -> bool: if isinstance(x, IncomparableTypeName): return False", "-> mypy.types.Type: if not self._is_intersection(type_): return type_ type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), )", "bool: if isinstance(x, IncomparableTypeName): return False return super().__eq__(x) def __hash__(self) -> int: #", "\"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return None def get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.MethodSigContext],", "typing import TypeGuard else: # pragma: no cover from typing_extensions import TypeGuard SignatureContext", "get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook def get_function_signature_hook( self,", "at the last index ('we skip \"object\" since everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"), ],", "return folded_type def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type: if not self._is_intersection(type_): return type_", "pylint: disable=useless-super-delegation return super().__hash__() def mk_protocol_intersection_typeinfo( name: str, *, # For ProtocolIntersections to", "line=context.type.line, column=context.type.column) return _type_analyze_hook def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]: # ignore version argument", "if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return None def get_method_signature_hook( self, fullname: str", "just as protocols, # their fullnames need to differ - that's it's an", "to not be treated as the same type, but just as protocols, #", "def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type: if not self._is_intersection(type_): return type_ type_info =", "mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process = deque([type_]) while intersections_to_process: intersection = intersections_to_process.popleft()", "intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A string that never", "= IncomparableTypeName(fullname) defn.info.is_protocol = True type_info = mypy.nodes.TypeInfo( names=symbol_table if symbol_table is not", "cover from typing_extensions import TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint:", "mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) -> None:", "if isinstance(folded_type, mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t) for t in folded_type.args) return folded_type def", "if sys.version_info >= (3, 10): # pragma: no cover from typing import TypeGuard", "intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg) continue if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper @staticmethod", "mypy.types.Type: args = tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args) symbol_table = mypy.nodes.SymbolTable() for arg", "self._is_intersection(arg): intersections_to_process.append(arg) continue if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper:", "fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type: folded_type = self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance): folded_type.args =", "- that's it's an IncomparableTypeName. fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] = None, ) ->", "fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column) return _type_analyze_hook def plugin(_version: str)", "fullname: str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes:", "everyone implements it') mypy.nodes.NameExpr(\"builtins.object\"), ], type_vars=[], ) defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol = True", ") defn.fullname = IncomparableTypeName(fullname) defn.info.is_protocol = True type_info = mypy.nodes.TypeInfo( names=symbol_table if symbol_table", "in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper:", "fullname: IncomparableTypeName, symbol_table: Optional[mypy.nodes.SymbolTable] = None, ) -> mypy.nodes.TypeInfo: defn = mypy.nodes.ClassDef( name=name,", "continue if isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ:", "isinstance(typ, mypy.types.Instance) and typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike:", "signature def type_analyze_hook(fullname: str) -> Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]: def _type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args", "collections import deque from typing import Callable, Optional import mypy.errorcodes import mypy.errors import", "-> TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance) and typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context:", "context.type.name, fullname=IncomparableTypeName(fullname), symbol_table=symbol_table ) return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column) return _type_analyze_hook def plugin(_version:", "sys import typing from collections import deque from typing import Callable, Optional import", "get_type_analyze_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return", "fullname: str ) -> Optional[Callable[[mypy.plugin.AnalyzeTypeContext], mypy.types.Type]]: if fullname == \"typing_protocol_intersection.types.ProtocolIntersection\": return type_analyze_hook(fullname) return", "= True type_info = mypy.nodes.TypeInfo( names=symbol_table if symbol_table is not None else mypy.nodes.SymbolTable(),", "= self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t) for t in folded_type.args) return", "if symbol_table is not None else mypy.nodes.SymbolTable(), defn=defn, module_name=\"typing_protocol_intersection\", ) type_info.mro = [type_info]", "defn = mypy.nodes.ClassDef( name=name, defs=mypy.nodes.Block([]), base_type_exprs=[ mypy.nodes.NameExpr(\"typing.Protocol\"), # mypy expects object to be", "-> TypeInfoWrapper: intersections_to_process = deque([type_]) while intersections_to_process: intersection = intersections_to_process.popleft() for arg in", "else: # pragma: no cover from typing_extensions import TypeGuard SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext]", "__eq__(self, x: object) -> bool: if isinstance(x, IncomparableTypeName): return False return super().__eq__(x) def", "type_info.is_protocol = True return type_info class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type:", "resolver = ProtocolIntersectionResolver() signature = context.default_signature signature.ret_type = resolver.fold_intersection_and_its_args(signature.ret_type) signature.arg_types = [resolver.fold_intersection_and_its_args(t) for", "[type_info] type_info.is_protocol = True return type_info class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_: mypy.types.Type) ->", "return super().__hash__() def mk_protocol_intersection_typeinfo( name: str, *, # For ProtocolIntersections to not be", "def mk_protocol_intersection_typeinfo( name: str, *, # For ProtocolIntersections to not be treated as", "fullname: str ) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook def get_function_signature_hook( self, fullname: str", "= mypy.nodes.SymbolTable() for arg in args: if isinstance(arg, mypy.types.Instance): if not arg.type.is_protocol: context.api.fail(", "import sys import typing from collections import deque from typing import Callable, Optional", "typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A string that never returns True when compared (equality) with", "_is_intersection(typ: mypy.types.Type) -> TypeGuard[mypy.types.Instance]: return isinstance(typ, mypy.types.Instance) and typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\" )", "-> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str):", "# mypy expects object to be here at the last index ('we skip", "= tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args) symbol_table = mypy.nodes.SymbolTable() for arg in args:", "_run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process = deque([type_]) while intersections_to_process: intersection", "def __eq__(self, x: object) -> bool: if isinstance(x, IncomparableTypeName): return False return super().__eq__(x)", "return type_ type_info = mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, []))", "return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo] class IncomparableTypeName(str): \"\"\"A string that", "type_info class ProtocolIntersectionResolver: def fold_intersection_and_its_args(self, type_: mypy.types.Type) -> mypy.types.Type: folded_type = self.fold_intersection(type_) if", "import mypy.nodes import mypy.options import mypy.plugin import mypy.types if sys.version_info >= (3, 10):", "# ignore version argument if the plugin works with all mypy versions. return", "TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process = deque([type_]) while intersections_to_process: intersection = intersections_to_process.popleft() for arg", "type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) -> TypeInfoWrapper: intersections_to_process", "in context.type.args) symbol_table = mypy.nodes.SymbolTable() for arg in args: if isinstance(arg, mypy.types.Instance): if", "str ) -> Optional[Callable[[mypy.plugin.FunctionSigContext], mypy.types.FunctionLike]]: return intersection_function_signature_hook class TypeInfoWrapper(typing.NamedTuple): type_info: mypy.nodes.TypeInfo base_classes: typing.List[mypy.nodes.TypeInfo]", "mypy.types.Instance) and typ.type.fullname == ( \"typing_protocol_intersection.types.ProtocolIntersection\" ) def intersection_function_signature_hook(context: SignatureContext) -> mypy.types.FunctionLike: resolver", "isinstance(arg, mypy.types.Instance): self._add_type_to_intersection(intersection_type_info_wrapper, arg) return intersection_type_info_wrapper @staticmethod def _add_type_to_intersection(intersection_type_info_wrapper: TypeInfoWrapper, typ: mypy.types.Instance) ->", "from collections import deque from typing import Callable, Optional import mypy.errorcodes import mypy.errors", "ti in type_info_wrapper.base_classes] return mypy.types.Instance(type_info_wrapper.type_info, args=args) def _run_fold(self, type_: mypy.types.Instance, intersection_type_info_wrapper: TypeInfoWrapper) ->", "t in folded_type.args) return folded_type def fold_intersection(self, type_: mypy.types.Type) -> mypy.types.Type: if not", "_type_analyze_hook(context: mypy.plugin.AnalyzeTypeContext) -> mypy.types.Type: args = tuple(context.api.analyze_type(arg_t) for arg_t in context.type.args) symbol_table =", "mypy.types.Instance): if not arg.type.is_protocol: context.api.fail( \"Only Protocols can be used in ProtocolIntersection.\", arg,", "mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type) @staticmethod def _is_intersection(typ:", "mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column) return _type_analyze_hook def plugin(_version: str) -> typing.Type[mypy.plugin.Plugin]: # ignore", "SignatureContext = typing.Union[mypy.plugin.FunctionSigContext, mypy.plugin.MethodSigContext] class ProtocolIntersectionPlugin(mypy.plugin.Plugin): # pylint: disable=unused-argument def get_type_analyze_hook( self, fullname:", "arg_t in context.type.args) symbol_table = mypy.nodes.SymbolTable() for arg in args: if isinstance(arg, mypy.types.Instance):", "symbol_table=symbol_table ) return mypy.types.Instance(type_info, args, line=context.type.line, column=context.type.column) return _type_analyze_hook def plugin(_version: str) ->", "None: name_expr = mypy.nodes.NameExpr(typ.type.name) name_expr.node = typ.type intersection_type_info_wrapper.type_info.defn.base_type_exprs.insert(0, name_expr) intersection_type_info_wrapper.type_info.mro.insert(0, typ.type) intersection_type_info_wrapper.base_classes.insert(0, typ.type)", "mk_protocol_intersection_typeinfo( \"ProtocolIntersection\", fullname=IncomparableTypeName(\"typing_protocol_intersection.types.ProtocolIntersection\"), ) type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, [])) args = [mypy.types.Instance(ti, [])", "import typing from collections import deque from typing import Callable, Optional import mypy.errorcodes", ") type_info_wrapper = self._run_fold(type_, TypeInfoWrapper(type_info, [])) args = [mypy.types.Instance(ti, []) for ti in", "= intersections_to_process.popleft() for arg in intersection.args: if self._is_intersection(arg): intersections_to_process.append(arg) continue if isinstance(arg, mypy.types.Instance):", "mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t) for t in folded_type.args) return folded_type def fold_intersection(self, type_:", "return type_analyze_hook(fullname) return None def get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[mypy.plugin.MethodSigContext], mypy.types.FunctionLike]]:", "folded_type = self.fold_intersection(type_) if isinstance(folded_type, mypy.types.Instance): folded_type.args = tuple(self.fold_intersection(t) for t in folded_type.args)" ]
[ "import migrations, models def update_names(apps, schema_editor): for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name =", "1.10.7 on 2017-07-18 04:43 from __future__ import unicode_literals from django.db import migrations, models", "'') x.save() for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name', '') x.save() class", "[ ('institutions', '0013_auto_20170718_0256'), ] operations = [ migrations.AddField( model_name='regon', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'),", "= [ migrations.AddField( model_name='regon', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.AddField( model_name='resp', name='name',", "django.db import migrations, models def update_names(apps, schema_editor): for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name", "x.save() for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name', '') x.save() class Migration(migrations.Migration):", "= [ ('institutions', '0013_auto_20170718_0256'), ] operations = [ migrations.AddField( model_name='regon', name='name', field=models.CharField(default='', max_length=200,", "for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name', '') x.save() class Migration(migrations.Migration): dependencies", "-*- # Generated by Django 1.10.7 on 2017-07-18 04:43 from __future__ import unicode_literals", "coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-07-18 04:43 from __future__", "x.save() class Migration(migrations.Migration): dependencies = [ ('institutions', '0013_auto_20170718_0256'), ] operations = [ migrations.AddField(", "Generated by Django 1.10.7 on 2017-07-18 04:43 from __future__ import unicode_literals from django.db", "from django.db import migrations, models def update_names(apps, schema_editor): for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator():", "'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa', '') x.save() for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name =", "'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name', '') x.save() class Migration(migrations.Migration): dependencies = [ ('institutions', '0013_auto_20170718_0256'),", "Migration(migrations.Migration): dependencies = [ ('institutions', '0013_auto_20170718_0256'), ] operations = [ migrations.AddField( model_name='regon', name='name',", "'') x.save() class Migration(migrations.Migration): dependencies = [ ('institutions', '0013_auto_20170718_0256'), ] operations = [", "dependencies = [ ('institutions', '0013_auto_20170718_0256'), ] operations = [ migrations.AddField( model_name='regon', name='name', field=models.CharField(default='',", "in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa', '') x.save() for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator():", "= x.data.get('name', '') x.save() class Migration(migrations.Migration): dependencies = [ ('institutions', '0013_auto_20170718_0256'), ] operations", "in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name', '') x.save() class Migration(migrations.Migration): dependencies = [", "unicode_literals from django.db import migrations, models def update_names(apps, schema_editor): for x in apps.get_model('institutions',", "__future__ import unicode_literals from django.db import migrations, models def update_names(apps, schema_editor): for x", "-*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-07-18 04:43 from", "def update_names(apps, schema_editor): for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa', '') x.save()", "utf-8 -*- # Generated by Django 1.10.7 on 2017-07-18 04:43 from __future__ import", "# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-07-18 04:43", "max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.AddField( model_name='resp', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.RunPython(update_names)", "field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.AddField( model_name='resp', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ),", "] operations = [ migrations.AddField( model_name='regon', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.AddField(", "# Generated by Django 1.10.7 on 2017-07-18 04:43 from __future__ import unicode_literals from", "import unicode_literals from django.db import migrations, models def update_names(apps, schema_editor): for x in", "name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.AddField( model_name='resp', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False,", "x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa', '') x.save() for x in apps.get_model('institutions',", "apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name', '') x.save() class Migration(migrations.Migration): dependencies = [ ('institutions',", "update_names(apps, schema_editor): for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa', '') x.save() for", "x.name = x.data.get('name', '') x.save() class Migration(migrations.Migration): dependencies = [ ('institutions', '0013_auto_20170718_0256'), ]", "operations = [ migrations.AddField( model_name='regon', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.AddField( model_name='resp',", "model_name='regon', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.AddField( model_name='resp', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'),", "verbose_name='Name'), preserve_default=False, ), migrations.AddField( model_name='resp', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.RunPython(update_names) ]", "by Django 1.10.7 on 2017-07-18 04:43 from __future__ import unicode_literals from django.db import", "Django 1.10.7 on 2017-07-18 04:43 from __future__ import unicode_literals from django.db import migrations,", "x.name = x.data.get('nazwa', '') x.save() for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name',", "apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa', '') x.save() for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name", "2017-07-18 04:43 from __future__ import unicode_literals from django.db import migrations, models def update_names(apps,", "= x.data.get('nazwa', '') x.save() for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name', '')", "class Migration(migrations.Migration): dependencies = [ ('institutions', '0013_auto_20170718_0256'), ] operations = [ migrations.AddField( model_name='regon',", "for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa', '') x.save() for x in", "x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name', '') x.save() class Migration(migrations.Migration): dependencies =", "models def update_names(apps, schema_editor): for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa', '')", "on 2017-07-18 04:43 from __future__ import unicode_literals from django.db import migrations, models def", "('institutions', '0013_auto_20170718_0256'), ] operations = [ migrations.AddField( model_name='regon', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False,", "schema_editor): for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa', '') x.save() for x", "x.data.get('name', '') x.save() class Migration(migrations.Migration): dependencies = [ ('institutions', '0013_auto_20170718_0256'), ] operations =", "'0013_auto_20170718_0256'), ] operations = [ migrations.AddField( model_name='regon', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ),", "[ migrations.AddField( model_name='regon', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.AddField( model_name='resp', name='name', field=models.CharField(default='',", "migrations.AddField( model_name='regon', name='name', field=models.CharField(default='', max_length=200, verbose_name='Name'), preserve_default=False, ), migrations.AddField( model_name='resp', name='name', field=models.CharField(default='', max_length=200,", "04:43 from __future__ import unicode_literals from django.db import migrations, models def update_names(apps, schema_editor):", "migrations, models def update_names(apps, schema_editor): for x in apps.get_model('institutions', 'regon').objects.exclude(data=None).iterator(): x.name = x.data.get('nazwa',", "x.data.get('nazwa', '') x.save() for x in apps.get_model('institutions', 'resp').objects.exclude(data=None).iterator(): x.name = x.data.get('name', '') x.save()", "from __future__ import unicode_literals from django.db import migrations, models def update_names(apps, schema_editor): for" ]
[ "% str(response['id']) usr = User.get_by('authIDs_', auth_id) bio = response['bio'][:user.bio_span[1]] if response['bio'] else ''", "Todo replace opaque and repeated code such as # bio = response['bio'][:UserVdr.bio_span[1]] if", "User.get_by('authIDs_', auth_id) bio = response['bio'][:user.bio_span[1]] if response['bio'] else '' location = response['location'][:user.location_span[1]] if", "if response is None: flask.flash('You denied the request to sign in.') return flask.redirect(flask.url_for('index'))", "consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, ) github = auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized():", "= auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response = github.authorized_response() if response is None:", "replace opaque and repeated code such as # bio = response['bio'][:UserVdr.bio_span[1]] if response['bio']", "config from main import app import model.user as user #import User#, UserVdr github_config", "= 'github_%s' % str(response['id']) usr = User.get_by('authIDs_', auth_id) bio = response['bio'][:user.bio_span[1]] if response['bio']", "'') me = github.get('user') usr = retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token(): return", "'bio') def getField(response, name): field = response[name] if field: span = name +", "= (response['access_token'], '') me = github.get('user') usr = retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter def", "flask.session['oauth_token'] = (response['access_token'], '') me = github.get('user') usr = retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter", "= name + '_span' # depend on validators following this naming convention max", "''), response.get('login'), response.get('email', ''), location=location, bio=bio, github=response.get('login') ) # Todo replace opaque and", "@app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response = github.authorized_response() if response is None: flask.flash('You denied the", "= User.get_by('authIDs_', auth_id) bio = response['bio'][:user.bio_span[1]] if response['bio'] else '' location = response['location'][:user.location_span[1]]", "response.get('email', ''), location=location, bio=bio, github=response.get('login') ) # Todo replace opaque and repeated code", "flask import auth import config from main import app import model.user as user", "from main import app import model.user as user #import User#, UserVdr github_config =", "'' # with # bio = getField(response, 'bio') def getField(response, name): field =", "return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '') me = github.get('user') usr = retrieve_user_from_github(me.data) return", "= github.get('user') usr = retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/')", "# Todo replace opaque and repeated code such as # bio = response['bio'][:UserVdr.bio_span[1]]", "# consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, ) github = auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response", "User#, UserVdr github_config = dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret,", "def get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github(): return auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id =", "code such as # bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else '' # with", "or auth.create_or_get_user_db( auth_id, response.get('name', ''), response.get('login'), response.get('email', ''), location=location, bio=bio, github=response.get('login') ) #", "signin_github(): return auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id = 'github_%s' % str(response['id']) usr = User.get_by('authIDs_',", "retrieve_user_from_github(response): auth_id = 'github_%s' % str(response['id']) usr = User.get_by('authIDs_', auth_id) bio = response['bio'][:user.bio_span[1]]", "else '' # with # bio = getField(response, 'bio') def getField(response, name): field", "location=location, bio=bio, github=response.get('login') ) # Todo replace opaque and repeated code such as", "str(response['id']) usr = User.get_by('authIDs_', auth_id) bio = response['bio'][:user.bio_span[1]] if response['bio'] else '' location", "'github_%s' % str(response['id']) usr = User.get_by('authIDs_', auth_id) bio = response['bio'][:user.bio_span[1]] if response['bio'] else", "and repeated code such as # bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else ''", "response['bio'] else '' # with # bio = getField(response, 'bio') def getField(response, name):", "if response['bio'] else '' location = response['location'][:user.location_span[1]] if response['location'] else '' return usr", ") # Todo replace opaque and repeated code such as # bio =", "user #import User#, UserVdr github_config = dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id,", "usr or auth.create_or_get_user_db( auth_id, response.get('name', ''), response.get('login'), response.get('email', ''), location=location, bio=bio, github=response.get('login') )", "getField(response, 'bio') def getField(response, name): field = response[name] if field: span = name", "github.authorized_response() if response is None: flask.flash('You denied the request to sign in.') return", "on validators following this naming convention max = getattr(user, span)[1] return field [:max]", "return auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id = 'github_%s' % str(response['id']) usr = User.get_by('authIDs_', auth_id)", "field = response[name] if field: span = name + '_span' # depend on", "# coding: utf-8 # pylint: disable=missing-docstring, invalid-name import flask import auth import config", "github_config = dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'},", "None: flask.flash('You denied the request to sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'],", "= response['bio'][:UserVdr.bio_span[1]] if response['bio'] else '' # with # bio = getField(response, 'bio')", "response is None: flask.flash('You denied the request to sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token']", "as user #import User#, UserVdr github_config = dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', #", "= dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, )", "validators following this naming convention max = getattr(user, span)[1] return field [:max] return", "@github.tokengetter def get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github(): return auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id", "sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '') me = github.get('user') usr =", "import auth import config from main import app import model.user as user #import", "request_token_params={'scope': 'user:email'}, ) github = auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response = github.authorized_response()", "bio = response['bio'][:user.bio_span[1]] if response['bio'] else '' location = response['location'][:user.location_span[1]] if response['location'] else", "auth.create_or_get_user_db( auth_id, response.get('name', ''), response.get('login'), response.get('email', ''), location=location, bio=bio, github=response.get('login') ) # Todo", "flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '') me = github.get('user') usr = retrieve_user_from_github(me.data) return auth.signin_via_social(usr)", "main import app import model.user as user #import User#, UserVdr github_config = dict(", "consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, ) github = auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response =", "following this naming convention max = getattr(user, span)[1] return field [:max] return ''", "# depend on validators following this naming convention max = getattr(user, span)[1] return", "opaque and repeated code such as # bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else", "the request to sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '') me =", "usr = User.get_by('authIDs_', auth_id) bio = response['bio'][:user.bio_span[1]] if response['bio'] else '' location =", "bio = getField(response, 'bio') def getField(response, name): field = response[name] if field: span", "'' return usr or auth.create_or_get_user_db( auth_id, response.get('name', ''), response.get('login'), response.get('email', ''), location=location, bio=bio,", "base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, ) github = auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/')", "= response[name] if field: span = name + '_span' # depend on validators", "denied the request to sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '') me", "import config from main import app import model.user as user #import User#, UserVdr", "import flask import auth import config from main import app import model.user as", "to sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '') me = github.get('user') usr", "such as # bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else '' # with #", "get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github(): return auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id = 'github_%s'", "depend on validators following this naming convention max = getattr(user, span)[1] return field", "usr = retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github():", "import app import model.user as user #import User#, UserVdr github_config = dict( access_token_method='POST',", "bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else '' # with # bio = getField(response,", "if response['location'] else '' return usr or auth.create_or_get_user_db( auth_id, response.get('name', ''), response.get('login'), response.get('email',", "# with # bio = getField(response, 'bio') def getField(response, name): field = response[name]", "auth import config from main import app import model.user as user #import User#,", "name): field = response[name] if field: span = name + '_span' # depend", "'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response = github.authorized_response() if response is None: flask.flash('You denied", "github.get('user') usr = retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/') def", "= github.authorized_response() if response is None: flask.flash('You denied the request to sign in.')", "response['bio'] else '' location = response['location'][:user.location_span[1]] if response['location'] else '' return usr or", "#import User#, UserVdr github_config = dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, #", "''), location=location, bio=bio, github=response.get('login') ) # Todo replace opaque and repeated code such", "location = response['location'][:user.location_span[1]] if response['location'] else '' return usr or auth.create_or_get_user_db( auth_id, response.get('name',", "auth_id, response.get('name', ''), response.get('login'), response.get('email', ''), location=location, bio=bio, github=response.get('login') ) # Todo replace", "# bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else '' # with # bio =", "auth_id) bio = response['bio'][:user.bio_span[1]] if response['bio'] else '' location = response['location'][:user.location_span[1]] if response['location']", "me = github.get('user') usr = retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token(): return flask.session.get('oauth_token')", "getField(response, name): field = response[name] if field: span = name + '_span' #", "flask.flash('You denied the request to sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '')", "access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, ) github =", "UserVdr github_config = dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope':", "return auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github(): return auth.signin_oauth(github) def", "as # bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else '' # with # bio", "response.get('name', ''), response.get('login'), response.get('email', ''), location=location, bio=bio, github=response.get('login') ) # Todo replace opaque", "response['bio'][:UserVdr.bio_span[1]] if response['bio'] else '' # with # bio = getField(response, 'bio') def", "model.user as user #import User#, UserVdr github_config = dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/',", "= retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github(): return", "authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, ) github = auth.create_oauth_app(github_config, 'github')", "github_authorized(): response = github.authorized_response() if response is None: flask.flash('You denied the request to", "github=response.get('login') ) # Todo replace opaque and repeated code such as # bio", "'user:email'}, ) github = auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response = github.authorized_response() if", "invalid-name import flask import auth import config from main import app import model.user", "dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, ) github", "# bio = getField(response, 'bio') def getField(response, name): field = response[name] if field:", "# pylint: disable=missing-docstring, invalid-name import flask import auth import config from main import", "request to sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '') me = github.get('user')", "coding: utf-8 # pylint: disable=missing-docstring, invalid-name import flask import auth import config from", "def signin_github(): return auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id = 'github_%s' % str(response['id']) usr =", "app import model.user as user #import User#, UserVdr github_config = dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token',", "auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github(): return auth.signin_oauth(github) def retrieve_user_from_github(response):", "with # bio = getField(response, 'bio') def getField(response, name): field = response[name] if", "auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id = 'github_%s' % str(response['id']) usr = User.get_by('authIDs_', auth_id) bio", "response['location'] else '' return usr or auth.create_or_get_user_db( auth_id, response.get('name', ''), response.get('login'), response.get('email', ''),", "bio=bio, github=response.get('login') ) # Todo replace opaque and repeated code such as #", "response = github.authorized_response() if response is None: flask.flash('You denied the request to sign", "field: span = name + '_span' # depend on validators following this naming", "# consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, ) github = auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def", ") github = auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response = github.authorized_response() if response", "github = auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response = github.authorized_response() if response is", "import model.user as user #import User#, UserVdr github_config = dict( access_token_method='POST', access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize',", "@app.route('/signin/github/') def signin_github(): return auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id = 'github_%s' % str(response['id']) usr", "= getField(response, 'bio') def getField(response, name): field = response[name] if field: span =", "else '' location = response['location'][:user.location_span[1]] if response['location'] else '' return usr or auth.create_or_get_user_db(", "auth_id = 'github_%s' % str(response['id']) usr = User.get_by('authIDs_', auth_id) bio = response['bio'][:user.bio_span[1]] if", "+ '_span' # depend on validators following this naming convention max = getattr(user,", "if response['bio'] else '' # with # bio = getField(response, 'bio') def getField(response,", "if field: span = name + '_span' # depend on validators following this", "def github_authorized(): response = github.authorized_response() if response is None: flask.flash('You denied the request", "response['bio'][:user.bio_span[1]] if response['bio'] else '' location = response['location'][:user.location_span[1]] if response['location'] else '' return", "'' location = response['location'][:user.location_span[1]] if response['location'] else '' return usr or auth.create_or_get_user_db( auth_id,", "repeated code such as # bio = response['bio'][:UserVdr.bio_span[1]] if response['bio'] else '' #", "def getField(response, name): field = response[name] if field: span = name + '_span'", "= response['bio'][:user.bio_span[1]] if response['bio'] else '' location = response['location'][:user.location_span[1]] if response['location'] else ''", "name + '_span' # depend on validators following this naming convention max =", "disable=missing-docstring, invalid-name import flask import auth import config from main import app import", "(response['access_token'], '') me = github.get('user') usr = retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token():", "in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] = (response['access_token'], '') me = github.get('user') usr = retrieve_user_from_github(me.data)", "flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github(): return auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id = 'github_%s' % str(response['id'])", "access_token_url='https://github.com/login/oauth/access_token', authorize_url='https://github.com/login/oauth/authorize', base_url='https://api.github.com/', # consumer_key=config.CONFIG_DB.auth_github_id, # consumer_secret=config.CONFIG_DB.auth_github_secret, request_token_params={'scope': 'user:email'}, ) github = auth.create_oauth_app(github_config,", "return flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github(): return auth.signin_oauth(github) def retrieve_user_from_github(response): auth_id = 'github_%s' %", "span = name + '_span' # depend on validators following this naming convention", "pylint: disable=missing-docstring, invalid-name import flask import auth import config from main import app", "= response['location'][:user.location_span[1]] if response['location'] else '' return usr or auth.create_or_get_user_db( auth_id, response.get('name', ''),", "utf-8 # pylint: disable=missing-docstring, invalid-name import flask import auth import config from main", "is None: flask.flash('You denied the request to sign in.') return flask.redirect(flask.url_for('index')) flask.session['oauth_token'] =", "retrieve_user_from_github(me.data) return auth.signin_via_social(usr) @github.tokengetter def get_github_oauth_token(): return flask.session.get('oauth_token') @app.route('/signin/github/') def signin_github(): return auth.signin_oauth(github)", "'_span' # depend on validators following this naming convention max = getattr(user, span)[1]", "auth.create_oauth_app(github_config, 'github') @app.route('/_s/callback/github/oauth-authorized/') def github_authorized(): response = github.authorized_response() if response is None: flask.flash('You", "def retrieve_user_from_github(response): auth_id = 'github_%s' % str(response['id']) usr = User.get_by('authIDs_', auth_id) bio =", "response['location'][:user.location_span[1]] if response['location'] else '' return usr or auth.create_or_get_user_db( auth_id, response.get('name', ''), response.get('login'),", "response.get('login'), response.get('email', ''), location=location, bio=bio, github=response.get('login') ) # Todo replace opaque and repeated", "else '' return usr or auth.create_or_get_user_db( auth_id, response.get('name', ''), response.get('login'), response.get('email', ''), location=location,", "response[name] if field: span = name + '_span' # depend on validators following", "return usr or auth.create_or_get_user_db( auth_id, response.get('name', ''), response.get('login'), response.get('email', ''), location=location, bio=bio, github=response.get('login')" ]
[ "date) except FileNotFoundError: if doscrape: print(\"* WARNING: beginning web scrape--be polite, they ban", "except StopIteration: data = None print(\"failed to process \" + date.strftime(\"%Y-%m-%d\")) return data", "(data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") < -4 # hours data.ix[dayflip, \"act\"] += timedelta(days=1) data[\"delayhours\"] =", "multi-day trips datadt[day == \"2\"] += timedelta(days=1) # NOT relativedelta(days=1) datadt[day == \"3\"]", "+ train + \" on \" + date.strftime(\"%Y-%m-%d\") ) if zipfn is not", "Path(train) / \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn, train, date): fn = Path(fn).expanduser()", "dint = dstr.astype(float) # int can't use nan # ZERO PAD HOURS for", "+ \" to \" + dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1] > 1: # late", "missedind = (timelefthours < 0).values missedhours = timelefthours[missedind] if missedind.sum() > 0: print(missedhours)", "ax.set_ylabel(\"p(late)\") show() else: print(\"* skipped plotting due to no data\") def tohdf5(fn, data,", "delay rolls past midnight! # train wouldn't be more than 4 hours early!", "/ 60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff() # we don't expect the delay", "try: ziptop = buildziptop(train, date) with ZipFile(ziptop, \"r\") as z: zippath = buildzippath(train,", "and len(dates) > int(days[trains[0]]) ): stations = [] for t in trains: stations.append(delays[t].index.values.tolist())", "str2datetime(data, day, datestr): dstr = data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\") + \"M\" dint =", "with z.open(zippath, \"r\") as f: txt = f.read().decode(\"utf-8\") except KeyError: print(\"I dont find\",", "# late vs. date end of route ax = delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\"", "return txt def gethtml(url): response = urlopen(url) html = response.read().decode(\"utf-8\") # session.request(\"GET\", url)", "compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for zip files! # trainpat = compile('(?<=\\* Train )\\d+') lastheadpat", "+ url) # html = '** could not read ' + url return", "+ laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show() else: print(\"* skipped plotting due to no", "17), (19, 24), (31, 36)], index_col=0, header=None, skiprows=0, ) # %% append last", "for line in inpt: tmp = firstheadpat.findall(line) if len(tmp) > 0: datestr =", "# print(arrival.values) # print(depart.values) elif overlapstation.size == 0: print(\"no connecting station found\") else:", "zipfile import ZipFile from bs4 import BeautifulSoup from re import compile from io", "store as text file like website # ziptop = 'test' + buildziptop(train,date) with", "skiprows=0, ) # %% append last arrival (destination) arv = getarv(txt) # %%", "data[\"day\"], datestr) # %% have to skip ahead a day when delay rolls", "than 12 hours between stations if (data[\"diffdelay\"].abs() > 12).any(): print(\"** WARNING: excessive time", "from re import compile from io import StringIO from time import sleep import", "print('** error ' + str(response.status) + ' could not read ' + url)", "z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) # %% def getday(datafn, date, train, zipfn, doscrape): try: txt", "\".zip\") def buildzippath(train, date): return Path(train) / \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn,", "single train with open(fn, \"r\") as f: html = f.read() txt = [gettxt(html)]", "data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\") + \"M\" dint = dstr.astype(float) # int can't use", "# print('** error ' + str(response.status) + ' could not read ' +", "try: txt = filehandler(datafn, train, date) except FileNotFoundError: if doscrape: print(\"* WARNING: beginning", "as z: zippath = buildzippath(train, date) with z.open(zippath, \"r\") as f: txt =", "+ laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") # histogram ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram:", "train info or too few dates\") def plottrain(delay, train, dates, stop, doplot): if", "in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca() ) ax.set_title(str(trains)", "datereq) data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"], datestr) # %%", "import Path from urllib.request import urlopen from numpy import nan, in1d, atleast_1d, logical_and", "dayflip = (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") < -4 # hours data.ix[dayflip, \"act\"] += timedelta(days=1)", "to have the needed data file for Train # \" + train +", "train with open(fn, \"r\") as f: html = f.read() txt = [gettxt(html)] elif", "+ str(response.getheader('Location'))) # else: # print('** error ' + str(response.status) + ' could", "ax = delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\" ) # plots last station ax.set_title(\"Hours late", "html = gethtml(url) txt = gettxt(html) else: exit( \"you dont seem to have", "moved to ' + str(response.getheader('Location'))) # else: # print('** error ' + str(response.status)", "date) # mass download, throttle to be polite sleep(2) html = gethtml(url) txt", "the delay to jump more than 12 hours between stations if (data[\"diffdelay\"].abs() >", "1: # late vs. date end of route ax = delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\",", "if sd != \"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr = datestr + \"T\" +", "= stop if doplot and delay.shape[1] > 0 and in1d([\"delay\", \"all\"], doplot).any(): if", "\" + laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show() else: print(\"* skipped plotting due to", "def getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r in lookbehind lastline = llrgx.findall(txt)[0]", "files! # trainpat = compile('(?<=\\* Train )\\d+') lastheadpat = compile(r\"^\\* V\") datestr =", "left to connect\") show() # print(goodtimes) # print(depart[goodtimes].index) # print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) #", "= session.getresponse() # if response.status == 200: # html = response.read().decode('utf-8') # elif", "to no data\") def tohdf5(fn, data, date): from pandas import HDFStore h5 =", "= [\"day\", \"sked\", \"act\"] return data, datestr def getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") #", "connections detected for \" + str(trains)) if goodtimes.size < 6 and in1d([\"conn\", \"all\"],", "day when delay rolls past midnight! # train wouldn't be more than 4", "in1d(stations[0], stations[1]) overlapstation = atleast_1d(stations[0][overlapind]) if overlapstation.size == 1: overlapstation = overlapstation[0] otherind", "is None: # must be a zip file where no dates are give", "pd.read_fwf( inpt, colspecs=[(2, 5), (16, 17), (19, 24), (31, 36)], index_col=0, header=None, skiprows=0,", "in1d(stations[1], overlapstation) if otherind > overlapind: daydiff = int(days[trains[1]]) - 1 arrival =", "= firstheadpat.findall(line) if len(tmp) > 0: datestr = tmp[0] if len(lastheadpat.findall(line)) > 0:", "how=\"all\") # needed for trailing blank lines data = data.replace(\"*\", nan) # now", "overlapstation = overlapstation[0] otherind = in1d(stations[1], overlapstation) if otherind > overlapind: daydiff =", "plottrains(delays, actual, days, trains, dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be made? \"\"\"", "date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) # %% def getday(datafn, date, train, zipfn, doscrape): try:", "txt def getdata(txt, datereq): # %% first the departures data, datestr = getdept(txt,", "file like website # ziptop = 'test' + buildziptop(train,date) with ZipFile(zipfn, \"a\") as", "ax=figure().gca() ) ax.set_title(str(trains) + \" made connection at \" + overlapstation) ax.set_ylabel(\"Hours left", "atleast_1d(stations[0][overlapind]) if overlapstation.size == 1: overlapstation = overlapstation[0] otherind = in1d(stations[1], overlapstation) if", "due to missing train info or too few dates\") def plottrain(delay, train, dates,", "= getarv(txt) # %% drop blank rows before appending arrival data = data.dropna(axis=0,", "or too few dates\") def plottrain(delay, train, dates, stop, doplot): if stop is", "= \"{:04d}\".format(sd.astype(int)) dstr = datestr + \"T\" + dstr + ampm # add", "date.strftime(\"%Y-%m-%d\") ) if zipfn is not None: print(\"writing \" + date.strftime(\"%Y-%m-%d\") + \"", "): stations = [] for t in trains: stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0], stations[1])", "import BeautifulSoup from re import compile from io import StringIO from time import", "if response.status == 200: # html = response.read().decode('utf-8') # elif response.status == 301:", "' could not read ' + url) # html = '** could not", "# must be a zip file where no dates are give datestr =", "compress_type=ZIP_DEFLATED) # %% def getday(datafn, date, train, zipfn, doscrape): try: txt = filehandler(datafn,", "for t in trains: stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0], stations[1]) overlapstation = atleast_1d(stations[0][overlapind]) if", "try: data = getdata(txt, date) except StopIteration: data = None print(\"failed to process", "possible parsing error!\") print(txt) print(data) data = None return data def getdept(txt, datereq):", "print('** 301 moved to ' + str(response.getheader('Location'))) # else: # print('** error '", "except KeyError: print(\"I dont find\", zippath) txt = None else: raise ValueError(\"I dont", "fontsize=8) else: ax = delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title(", "url += \"&selday=\" + date.strftime(\"%d\") return url def buildziptop(train, date): return Path(date.strftime(\"%Y\")) /", "inpt, colspecs=[(2, 5), (7, 8), (10, 15), (25, 30)], index_col=0, header=None, skiprows=0, converters={1:", "could not read ' + url) # html = '** could not read", "filehandler(datafn, train, date) except FileNotFoundError: if doscrape: print(\"* WARNING: beginning web scrape--be polite,", "data # %% def gettxt(html): soup = BeautifulSoup(html) txt = soup.get_text() return txt", "ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title( \"Train #\" + train + \" \" + dates[0].strftime(\"%Y/%m/%d\")", "to put time-zone aware to Eastern time.. # multi-day trips datadt[day == \"2\"]", "buildziptop(train,date) with ZipFile(zipfn, \"a\") as z: zippath = buildzippath(train, date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED)", "= data.dropna(axis=0, how=\"all\") # needed for trailing blank lines data = data.replace(\"*\", nan)", "dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr = datestr + \"T\" + dstr + ampm #", "trains try: ziptop = buildziptop(train, date) with ZipFile(ziptop, \"r\") as z: zippath =", "\" + dates[0].strftime(\"%Y/%m/%d\") + \" to \" + dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1] >", "actual[trains[1]].ix[overlapstation, :-daydiff] depart = actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff = int(days[trains[0]]) - 1 arrival", ") if zipfn is not None: print(\"writing \" + date.strftime(\"%Y-%m-%d\") + \" to", "return Path(train) / \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn, train, date): fn =", "= datereq.strftime(\"%m/%d/%Y\") break # data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns = ['city','skedarv','skeddep','actarv','actdep'] data =", "datestr = datereq.strftime(\"%m/%d/%Y\") break # data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns = ['city','skedarv','skeddep','actarv','actdep'] data", "# data.columns = ['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf( inpt, colspecs=[(2, 5), (16, 17), (19,", "rows before appending arrival data = data.dropna(axis=0, how=\"all\") # needed for trailing blank", "ziptop = buildziptop(train, date) with ZipFile(ziptop, \"r\") as z: zippath = buildzippath(train, date)", "show \"\"\" This function should be used politely and sparingly \"\"\" # from", "rolls past midnight! # train wouldn't be more than 4 hours early! dayflip", "datestr = tmp[0] if len(lastheadpat.findall(line)) > 0: if datestr is None: # must", "= [gettxt(html)] elif fn.suffix == \"txt\": # single train with open(fn, \"r\") as", "blank lines are gone, we swap for nan data.ix[-1] = arv.ix[0] # we", "data.ix[-1] = arv.ix[0] # we know arrival is one line, the last line", "1 arrival = actual[trains[0]].ix[overlapstation, :-daydiff] depart = actual[trains[1]].ix[overlapstation, daydiff:] # set_trace() goodtimes =", "front # finally put to datetime datadt = pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True )", "= compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for zip files! # trainpat = compile('(?<=\\* Train )\\d+')", "blank rows before appending arrival data = data.dropna(axis=0, how=\"all\") # needed for trailing", "put time-zone aware to Eastern time.. # multi-day trips datadt[day == \"2\"] +=", "ax.set_ylabel(\"Hours left to connect\") show() # print(goodtimes) # print(depart[goodtimes].index) # print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float))", "str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"], datestr) # %% have to skip", "\"html\": # single train with open(fn, \"r\") as f: html = f.read() txt", "not read ' + url) # html = '** could not read '", "== \"2\"] += timedelta(days=1) # NOT relativedelta(days=1) datadt[day == \"3\"] += timedelta(days=2) return", "+ date.strftime(\"%Y-%m-%d\") + \" to \" + zipfn) tozip(zipfn, txt, date, train) try:", "from matplotlib.pyplot import figure, show \"\"\" This function should be used politely and", "def buildziptop(train, date): return Path(date.strftime(\"%Y\")) / (str(train) + \".zip\") def buildzippath(train, date): return", "lookbehind lastline = llrgx.findall(txt)[0] with StringIO(lastline) as inpt: arv = pd.read_fwf( inpt, colspecs=[(2,", "print(depart.values) elif overlapstation.size == 0: print(\"no connecting station found\") else: print(\"more than 1", "60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff() # we don't expect the delay to", "yet\") else: print(\"skipped connection analysis due to missing train info or too few", "f: html = f.read() txt = [gettxt(html)] elif fn.suffix == \"txt\": # single", "(destination) arv = getarv(txt) # %% drop blank rows before appending arrival data", "as pd from matplotlib.pyplot import figure, show \"\"\" This function should be used", "return txt def getdata(txt, datereq): # %% first the departures data, datestr =", "== \"txt\": # single train with open(fn, \"r\") as f: txt = [f.read()]", "< 6 and in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\") elif", "plots last station ax.set_title(\"Hours late to \" + laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") #", "print(\"writing \" + date.strftime(\"%Y-%m-%d\") + \" to \" + zipfn) tozip(zipfn, txt, date,", "timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.boxplot( return_type=\"axes\", rot=90,", "0 and len(actual[trains[1]]) > 0 and len(dates) > int(days[trains[0]]) ): stations = []", "file where no dates are give datestr = datereq.strftime(\"%m/%d/%Y\") break # data =", "goodtimes.size < 6 and in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\")", "1e9 / 3600 ) timelefthours = pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind =", "+ train + \" \" + dates[0].strftime(\"%Y/%m/%d\") + \" to \" + dates[-1].strftime(\"%Y/%m/%d\")", "elif fn.suffix == \"txt\": # single train with open(fn, \"r\") as f: txt", "arrival is one line, the last line of the file data.columns = [\"day\",", "%% append last arrival (destination) arv = getarv(txt) # %% drop blank rows", "print(depart[goodtimes].index) # print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) # print(arrival.values) # print(depart.values) elif overlapstation.size == 0:", "- data[\"sked\"]).astype(\"timedelta64[h]\") < -4 # hours data.ix[dayflip, \"act\"] += timedelta(days=1) data[\"delayhours\"] = (data[\"act\"]", "polite, they ban for overuse!\") url = buildurl(train, date) # mass download, throttle", "put to datetime datadt = pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True ) # seems to", "find\", zippath) txt = None else: raise ValueError(\"I dont know how to parse\",", "website # ziptop = 'test' + buildziptop(train,date) with ZipFile(zipfn, \"a\") as z: zippath", "%% drop blank rows before appending arrival data = data.dropna(axis=0, how=\"all\") # needed", "import sleep import pandas as pd from matplotlib.pyplot import figure, show \"\"\" This", "process \" + date.strftime(\"%Y-%m-%d\")) return data # %% def gettxt(html): soup = BeautifulSoup(html)", "throttle to be polite sleep(2) html = gethtml(url) txt = gettxt(html) else: exit(", "politely and sparingly \"\"\" # from http.client import HTTPConnection def plottrains(delays, actual, days,", "date.strftime(\"%Y-%m-%d\") + \" to \" + zipfn) tozip(zipfn, txt, date, train) try: data", "\"r\") as f: html = f.read() txt = [gettxt(html)] elif fn.suffix == \"txt\":", "response.status == 301: # print('** 301 moved to ' + str(response.getheader('Location'))) # else:", "\"Train #\" + train + \" \" + dates[0].strftime(\"%Y/%m/%d\") + \" to \"", "getdept(txt, datereq) data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"], datestr) #", "time.. # multi-day trips datadt[day == \"2\"] += timedelta(days=1) # NOT relativedelta(days=1) datadt[day", "data = data.replace(\"*\", nan) # now that blank lines are gone, we swap", "lines data = data.replace(\"*\", nan) # now that blank lines are gone, we", "ZipFile from bs4 import BeautifulSoup from re import compile from io import StringIO", "\" + zipfn) tozip(zipfn, txt, date, train) try: data = getdata(txt, date) except", "are gone, we swap for nan data.ix[-1] = arv.ix[0] # we know arrival", "= in1d(stations[1], overlapstation) if otherind > overlapind: daydiff = int(days[trains[1]]) - 1 arrival", "# single train with open(fn, \"r\") as f: html = f.read() txt =", "compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r in lookbehind lastline = llrgx.findall(txt)[0] with StringIO(lastline) as inpt:", "data = getdata(txt, date) except StopIteration: data = None print(\"failed to process \"", "# from http.client import HTTPConnection def plottrains(delays, actual, days, trains, dates, doplot): \"\"\"", "overlapstation) if otherind > overlapind: daydiff = int(days[trains[1]]) - 1 arrival = actual[trains[1]].ix[overlapstation,", "# if response.status == 200: # html = response.read().decode('utf-8') # elif response.status ==", "departures data, datestr = getdept(txt, datereq) data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"] =", "buildurl(trainnum, date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum) url += \"&selyear=\" + date.strftime(\"%Y\")", "return url def buildziptop(train, date): return Path(date.strftime(\"%Y\")) / (str(train) + \".zip\") def buildzippath(train,", "delay.shape[1] > 1: # late vs. date end of route ax = delay.ix[stop].plot(", "than 1 connection found, this case isnt handled yet\") else: print(\"skipped connection analysis", "/ 3600 ) timelefthours = pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind = (timelefthours", "None else: raise ValueError(\"I dont know how to parse\", fn) return txt def", "can connection be made? \"\"\" # set_trace() if ( len(trains) == 2 and", "connect\") show() # print(goodtimes) # print(depart[goodtimes].index) # print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) # print(arrival.values) #", "# else: # print('** error ' + str(response.status) + ' could not read", "found, this case isnt handled yet\") else: print(\"skipped connection analysis due to missing", "(depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9 / 3600 ) timelefthours = pd.DataFrame( timelefthours, index=depart[goodtimes].index,", "> 0: if datestr is None: # must be a zip file where", "\"act\"] return data, datestr def getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r in", "late to \" + laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") # histogram ax = delay.ix[stop].hist(ax=figure().gca(),", "< 0).values missedhours = timelefthours[missedind] if missedind.sum() > 0: print(missedhours) else: print(\"no missed", "to datetime datadt = pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True ) # seems to put", "elif response.status == 301: # print('** 301 moved to ' + str(response.getheader('Location'))) #", "in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"], doplot).any():", "marker=\"*\" ) # plots last station ax.set_title(\"Hours late to \" + laststop) ax.set_ylabel(\"Hours", "normed=1, bins=12) ax.set_title(\"Histogram: Hours late to \" + laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show()", "have the needed data file for Train # \" + train + \"", "def plottrains(delays, actual, days, trains, dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be made?", "< 6: ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else: ax = delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10,", "# no \\r in lookbehind lastline = llrgx.findall(txt)[0] with StringIO(lastline) as inpt: arv", "30)], index_col=0, header=None, skiprows=0, converters={1: str}, ) return arv def str2datetime(data, day, datestr):", "/ 1e9 / 3600 ) timelefthours = pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind", "= tmp[0] if len(lastheadpat.findall(line)) > 0: if datestr is None: # must be", "pandas import HDFStore h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data h5.close() def tozip(zipfn, txt,", "> 0: datestr = tmp[0] if len(lastheadpat.findall(line)) > 0: if datestr is None:", "= (timelefthours < 0).values missedhours = timelefthours[missedind] if missedind.sum() > 0: print(missedhours) else:", "date): return Path(date.strftime(\"%Y\")) / (str(train) + \".zip\") def buildzippath(train, date): return Path(train) /", "print(\"failed to process \" + date.strftime(\"%Y-%m-%d\")) return data # %% def gettxt(html): soup", "[] for t in trains: stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0], stations[1]) overlapstation = atleast_1d(stations[0][overlapind])", ".values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff() # we don't expect the delay to jump more", "have to skip ahead a day when delay rolls past midnight! # train", "we don't expect the delay to jump more than 12 hours between stations", "int(days[trains[0]]) ): stations = [] for t in trains: stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0],", "sd in enumerate(dint): if sd != \"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr = datestr", "blank lines data = data.replace(\"*\", nan) # now that blank lines are gone,", "arv = pd.read_fwf( inpt, colspecs=[(2, 5), (7, 8), (10, 15), (25, 30)], index_col=0,", "in \"html\": # single train with open(fn, \"r\") as f: html = f.read()", "add date to front # finally put to datetime datadt = pd.to_datetime( dstr,", "filehandler(fn, train, date): fn = Path(fn).expanduser() if fn.suffix in \"html\": # single train", "connecting station found\") else: print(\"more than 1 connection found, this case isnt handled", "if ( len(trains) == 2 and len(actual[trains[0]]) > 0 and len(actual[trains[1]]) > 0", "if zipfn is not None: print(\"writing \" + date.strftime(\"%Y-%m-%d\") + \" to \"", "None return data def getdept(txt, datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for zip", "def tozip(zipfn, txt, date, train): from zipfile import ZIP_DEFLATED # store as text", "they ban for overuse!\") url = buildurl(train, date) # mass download, throttle to", "stations[1]) overlapstation = atleast_1d(stations[0][overlapind]) if overlapstation.size == 1: overlapstation = overlapstation[0] otherind =", "missedind.sum() > 0: print(missedhours) else: print(\"no missed connections detected for \" + str(trains))", "if otherind > overlapind: daydiff = int(days[trains[1]]) - 1 arrival = actual[trains[1]].ix[overlapstation, :-daydiff]", "otherind = in1d(stations[1], overlapstation) if otherind > overlapind: daydiff = int(days[trains[1]]) - 1", "data = data.dropna(axis=0, how=\"all\") # needed for trailing blank lines data = data.replace(\"*\",", "= overlapstation[0] otherind = in1d(stations[1], overlapstation) if otherind > overlapind: daydiff = int(days[trains[1]])", "# html = response.read().decode('utf-8') # elif response.status == 301: # print('** 301 moved", "no \\r in lookbehind lastline = llrgx.findall(txt)[0] with StringIO(lastline) as inpt: arv =", "compile from io import StringIO from time import sleep import pandas as pd", "[gettxt(html)] elif fn.suffix == \"txt\": # single train with open(fn, \"r\") as f:", "for nan data.ix[-1] = arv.ix[0] # we know arrival is one line, the", "soup.get_text() return txt def getdata(txt, datereq): # %% first the departures data, datestr", "= compile(r\"^\\* V\") datestr = None with StringIO(txt) as inpt: for line in", "early! dayflip = (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") < -4 # hours data.ix[dayflip, \"act\"] +=", "too few dates\") def plottrain(delay, train, dates, stop, doplot): if stop is None:", "arv = getarv(txt) # %% drop blank rows before appending arrival data =", "+ ' could not read ' + url) # html = '** could", "station ax.set_title(\"Hours late to \" + laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") # histogram ax", ") timelefthours = pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind = (timelefthours < 0).values", "day, datestr): dstr = data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\") + \"M\" dint = dstr.astype(float)", "return data # %% def gettxt(html): soup = BeautifulSoup(html) txt = soup.get_text() return", "# set_trace() goodtimes = logical_and(depart.notnull(), arrival.notnull()).values timelefthours = ( (depart[goodtimes].values - arrival[goodtimes].values).astype(float) /", "don't expect the delay to jump more than 12 hours between stations if", "skiprows=0, converters={1: str}, ) return arv def str2datetime(data, day, datestr): dstr = data.str.extract(r\"(\\d+)\")", "date): from pandas import HDFStore h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data h5.close() def", "= HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data h5.close() def tozip(zipfn, txt, date, train): from zipfile", "datetime import timedelta from zipfile import ZipFile from bs4 import BeautifulSoup from re", "on \" + date.strftime(\"%Y-%m-%d\") ) if zipfn is not None: print(\"writing \" +", "data[\"sked\"]).astype( \"timedelta64[m]\" ) / 60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff() # we don't", "open(fn, \"r\") as f: txt = [f.read()] elif fn.suffix == \"\": # single", "%% first the departures data, datestr = getdept(txt, datereq) data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"],", "and len(actual[trains[0]]) > 0 and len(actual[trains[1]]) > 0 and len(dates) > int(days[trains[0]]) ):", "BeautifulSoup(html) txt = soup.get_text() return txt def getdata(txt, datereq): # %% first the", "delay to jump more than 12 hours between stations if (data[\"diffdelay\"].abs() > 12).any():", "return data, datestr def getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r in lookbehind", "= data.str.extract(\"([AP])\") + \"M\" dint = dstr.astype(float) # int can't use nan #", "datetime datadt = pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True ) # seems to put time-zone", "# plots last station ax.set_title(\"Hours late to \" + laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\")", "to be polite sleep(2) html = gethtml(url) txt = gettxt(html) else: exit( \"you", "arrival (destination) arv = getarv(txt) # %% drop blank rows before appending arrival", "ahead a day when delay rolls past midnight! # train wouldn't be more", "print(data) data = None return data def getdept(txt, datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") #", "# session.request(\"GET\", url) # response = session.getresponse() # if response.status == 200: #", "+ date.strftime(\"%Y-%m-%d\") ) if zipfn is not None: print(\"writing \" + date.strftime(\"%Y-%m-%d\") +", "connection at \" + overlapstation) ax.set_ylabel(\"Hours left to connect\") show() # print(goodtimes) #", "fn.suffix == \"txt\": # single train with open(fn, \"r\") as f: txt =", "figure, show \"\"\" This function should be used politely and sparingly \"\"\" #", "len(trains) == 2 and len(actual[trains[0]]) > 0 and len(actual[trains[1]]) > 0 and len(dates)", "date.strftime(\"%m\") url += \"&selday=\" + date.strftime(\"%d\") return url def buildziptop(train, date): return Path(date.strftime(\"%Y\"))", ") missedind = (timelefthours < 0).values missedhours = timelefthours[missedind] if missedind.sum() > 0:", "needed for trailing blank lines data = data.replace(\"*\", nan) # now that blank", "== 0: print(\"no connecting station found\") else: print(\"more than 1 connection found, this", "+ ampm # add date to front # finally put to datetime datadt", "dont seem to have the needed data file for Train # \" +", "timelefthours = pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind = (timelefthours < 0).values missedhours", "(25, 30)], index_col=0, header=None, skiprows=0, converters={1: str}, ) return arv def str2datetime(data, day,", "doplot).any(): if delay.shape[1] < 6: ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else: ax =", "= actual[trains[1]].ix[overlapstation, :-daydiff] depart = actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff = int(days[trains[0]]) - 1", "None: stop = -1 laststop = delay.index[-1] else: laststop = stop if doplot", "= f.read().decode(\"utf-8\") except KeyError: print(\"I dont find\", zippath) txt = None else: raise", "legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10, 90],", "timedelta(days=2) return datadt def buildurl(trainnum, date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum) url", "made? \"\"\" # set_trace() if ( len(trains) == 2 and len(actual[trains[0]]) > 0", "> 0 and len(actual[trains[1]]) > 0 and len(dates) > int(days[trains[0]]) ): stations =", "at \" + overlapstation) ax.set_ylabel(\"Hours left to connect\") show() # print(goodtimes) # print(depart[goodtimes].index)", "FileNotFoundError: if doscrape: print(\"* WARNING: beginning web scrape--be polite, they ban for overuse!\")", "(data[\"diffdelay\"].abs() > 12).any(): print(\"** WARNING: excessive time difference detected, possible parsing error!\") print(txt)", "\"\"\" # from http.client import HTTPConnection def plottrains(delays, actual, days, trains, dates, doplot):", "getarv(txt) # %% drop blank rows before appending arrival data = data.dropna(axis=0, how=\"all\")", "marker=\".\", legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10,", "delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram: Hours late to \" + laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\")", "in lookbehind lastline = llrgx.findall(txt)[0] with StringIO(lastline) as inpt: arv = pd.read_fwf( inpt,", "from io import StringIO from time import sleep import pandas as pd from", "if (data[\"diffdelay\"].abs() > 12).any(): print(\"** WARNING: excessive time difference detected, possible parsing error!\")", "for zip files! # trainpat = compile('(?<=\\* Train )\\d+') lastheadpat = compile(r\"^\\* V\")", "sparingly \"\"\" # from http.client import HTTPConnection def plottrains(delays, actual, days, trains, dates,", "a zip file where no dates are give datestr = datereq.strftime(\"%m/%d/%Y\") break #", "12).any(): print(\"** WARNING: excessive time difference detected, possible parsing error!\") print(txt) print(data) data", "html = response.read().decode(\"utf-8\") # session.request(\"GET\", url) # response = session.getresponse() # if response.status", "!= \"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr = datestr + \"T\" + dstr +", "= gethtml(url) txt = gettxt(html) else: exit( \"you dont seem to have the", "import compile from io import StringIO from time import sleep import pandas as", "# print(goodtimes) # print(depart[goodtimes].index) # print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) # print(arrival.values) # print(depart.values) elif", "route ax = delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\" ) # plots last station ax.set_title(\"Hours", "goodtimes = logical_and(depart.notnull(), arrival.notnull()).values timelefthours = ( (depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9 /", "date, train, zipfn, doscrape): try: txt = filehandler(datafn, train, date) except FileNotFoundError: if", "\"&selmonth=\" + date.strftime(\"%m\") url += \"&selday=\" + date.strftime(\"%d\") return url def buildziptop(train, date):", "+= timedelta(days=2) return datadt def buildurl(trainnum, date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum)", "wouldn't be more than 4 hours early! dayflip = (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") <", "# print(depart[goodtimes].index) # print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) # print(arrival.values) # print(depart.values) elif overlapstation.size ==", "text file like website # ziptop = 'test' + buildziptop(train,date) with ZipFile(zipfn, \"a\")", "ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram: Hours late to \" + laststop) ax.set_xlabel(\"Hours", "def buildurl(trainnum, date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum) url += \"&selyear=\" +", "def getdata(txt, datereq): # %% first the departures data, datestr = getdept(txt, datereq)", "90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title( \"Train #\" + train + \" \"", "utc=True ) # seems to put time-zone aware to Eastern time.. # multi-day", "with open(fn, \"r\") as f: txt = [f.read()] elif fn.suffix == \"\": #", "= actual[trains[0]].ix[overlapstation, :-daydiff] depart = actual[trains[1]].ix[overlapstation, daydiff:] # set_trace() goodtimes = logical_and(depart.notnull(), arrival.notnull()).values", "train + \" \" + dates[0].strftime(\"%Y/%m/%d\") + \" to \" + dates[-1].strftime(\"%Y/%m/%d\") )", "= pd.read_fwf( inpt, colspecs=[(2, 5), (7, 8), (10, 15), (25, 30)], index_col=0, header=None,", "\"\": # single or multiple trains try: ziptop = buildziptop(train, date) with ZipFile(ziptop,", "if len(tmp) > 0: datestr = tmp[0] if len(lastheadpat.findall(line)) > 0: if datestr", "polite sleep(2) html = gethtml(url) txt = gettxt(html) else: exit( \"you dont seem", "= delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram: Hours late to \" + laststop) ax.set_xlabel(\"Hours Late\")", "arrival[goodtimes].values).astype(float) / 1e9 / 3600 ) timelefthours = pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] )", "data h5.close() def tozip(zipfn, txt, date, train): from zipfile import ZIP_DEFLATED # store", "ax.set_ylabel(\"hours delay\") ax.set_title( \"Train #\" + train + \" \" + dates[0].strftime(\"%Y/%m/%d\") +", "pd.read_fwf( inpt, colspecs=[(2, 5), (7, 8), (10, 15), (25, 30)], index_col=0, header=None, skiprows=0,", "date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn, train, date): fn = Path(fn).expanduser() if fn.suffix in \"html\":", "0 and in1d([\"delay\", \"all\"], doplot).any(): if delay.shape[1] < 6: ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\",", "re import compile from io import StringIO from time import sleep import pandas", "= timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.boxplot( return_type=\"axes\",", "stop, doplot): if stop is None: stop = -1 laststop = delay.index[-1] else:", "more than 12 hours between stations if (data[\"diffdelay\"].abs() > 12).any(): print(\"** WARNING: excessive", "data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns = ['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf( inpt, colspecs=[(2, 5),", "response.read().decode('utf-8') # elif response.status == 301: # print('** 301 moved to ' +", "if datestr is None: # must be a zip file where no dates", "date): fn = Path(fn).expanduser() if fn.suffix in \"html\": # single train with open(fn,", "set_trace() goodtimes = logical_and(depart.notnull(), arrival.notnull()).values timelefthours = ( (depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9", "print(missedhours) else: print(\"no missed connections detected for \" + str(trains)) if goodtimes.size <", "and delay.shape[1] > 0 and in1d([\"delay\", \"all\"], doplot).any(): if delay.shape[1] < 6: ax", "zip files! # trainpat = compile('(?<=\\* Train )\\d+') lastheadpat = compile(r\"^\\* V\") datestr", "trailing blank lines data = data.replace(\"*\", nan) # now that blank lines are", "Eastern time.. # multi-day trips datadt[day == \"2\"] += timedelta(days=1) # NOT relativedelta(days=1)", "delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\" ) # plots last station ax.set_title(\"Hours late to \"", "plottrain(delay, train, dates, stop, doplot): if stop is None: stop = -1 laststop", "+= str(trainnum) url += \"&selyear=\" + date.strftime(\"%Y\") url += \"&selmonth=\" + date.strftime(\"%m\") url", "buildzippath(train, date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) # %% def getday(datafn, date, train, zipfn, doscrape):", "tohdf5(fn, data, date): from pandas import HDFStore h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data", "Train )\\d+') lastheadpat = compile(r\"^\\* V\") datestr = None with StringIO(txt) as inpt:", "= in1d(stations[0], stations[1]) overlapstation = atleast_1d(stations[0][overlapind]) if overlapstation.size == 1: overlapstation = overlapstation[0]", "datestr) data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"], datestr) # %% have to skip ahead a", "ZipFile(ziptop, \"r\") as z: zippath = buildzippath(train, date) with z.open(zippath, \"r\") as f:", "http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be made? \"\"\" # set_trace() if ( len(trains) == 2", "ampm # add date to front # finally put to datetime datadt =", "200: # html = response.read().decode('utf-8') # elif response.status == 301: # print('** 301", "15), (25, 30)], index_col=0, header=None, skiprows=0, converters={1: str}, ) return arv def str2datetime(data,", "relativedelta(days=1) datadt[day == \"3\"] += timedelta(days=2) return datadt def buildurl(trainnum, date): url =", "pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True ) # seems to put time-zone aware to Eastern", "last line of the file data.columns = [\"day\", \"sked\", \"act\"] return data, datestr", "HTTPConnection def plottrains(delays, actual, days, trains, dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be", "This function should be used politely and sparingly \"\"\" # from http.client import", "Late\") ax.set_xlabel(\"date\") # histogram ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram: Hours late to", "data[\"delayhours\"] = (data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\" ) / 60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"] =", "arrival = actual[trains[0]].ix[overlapstation, :-daydiff] depart = actual[trains[1]].ix[overlapstation, daydiff:] # set_trace() goodtimes = logical_and(depart.notnull(),", "late vs. date end of route ax = delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\" )", "' + url) # html = '** could not read ' + url", "return data def getdept(txt, datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for zip files!", "+ date.strftime(\"%d\") return url def buildziptop(train, date): return Path(date.strftime(\"%Y\")) / (str(train) + \".zip\")", "must be a zip file where no dates are give datestr = datereq.strftime(\"%m/%d/%Y\")", "\"act\"] += timedelta(days=1) data[\"delayhours\"] = (data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\" ) / 60 #", "= getdata(txt, date) except StopIteration: data = None print(\"failed to process \" +", "doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be made? \"\"\" # set_trace() if ( len(trains)", "or multiple trains try: ziptop = buildziptop(train, date) with ZipFile(ziptop, \"r\") as z:", "linestyle=\"\", marker=\"*\" ) # plots last station ax.set_title(\"Hours late to \" + laststop)", "== 1: overlapstation = overlapstation[0] otherind = in1d(stations[1], overlapstation) if otherind > overlapind:", "of the file data.columns = [\"day\", \"sked\", \"act\"] return data, datestr def getarv(txt):", "data, datestr def getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r in lookbehind lastline", "llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r in lookbehind lastline = llrgx.findall(txt)[0] with StringIO(lastline)", "bins=12) ax.set_title(\"Histogram: Hours late to \" + laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show() else:", "\" + laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") # histogram ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12)", "rot=90, whis=[10, 90], ax=figure().gca() ) ax.set_title(str(trains) + \" made connection at \" +", "whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title( \"Train #\" + train + \"", "to missing train info or too few dates\") def plottrain(delay, train, dates, stop,", "getdata(txt, date) except StopIteration: data = None print(\"failed to process \" + date.strftime(\"%Y-%m-%d\"))", "+= \"&selday=\" + date.strftime(\"%d\") return url def buildziptop(train, date): return Path(date.strftime(\"%Y\")) / (str(train)", "ValueError(\"I dont know how to parse\", fn) return txt def gethtml(url): response =", "overlapstation[0] otherind = in1d(stations[1], overlapstation) if otherind > overlapind: daydiff = int(days[trains[1]]) -", "301 moved to ' + str(response.getheader('Location'))) # else: # print('** error ' +", "dates, stop, doplot): if stop is None: stop = -1 laststop = delay.index[-1]", "data = None print(\"failed to process \" + date.strftime(\"%Y-%m-%d\")) return data # %%", "show() # print(goodtimes) # print(depart[goodtimes].index) # print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) # print(arrival.values) # print(depart.values)", "0: print(missedhours) else: print(\"no missed connections detected for \" + str(trains)) if goodtimes.size", "isnt handled yet\") else: print(\"skipped connection analysis due to missing train info or", "= filehandler(datafn, train, date) except FileNotFoundError: if doscrape: print(\"* WARNING: beginning web scrape--be", "date to front # finally put to datetime datadt = pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\",", "str(response.getheader('Location'))) # else: # print('** error ' + str(response.status) + ' could not", "buildurl(train, date) # mass download, throttle to be polite sleep(2) html = gethtml(url)", "append last arrival (destination) arv = getarv(txt) # %% drop blank rows before", "getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r in lookbehind lastline = llrgx.findall(txt)[0] with", "doplot): if stop is None: stop = -1 laststop = delay.index[-1] else: laststop", "= delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title( \"Train #\" +", "\" to \" + zipfn) tozip(zipfn, txt, date, train) try: data = getdata(txt,", "print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) # print(arrival.values) # print(depart.values) elif overlapstation.size == 0: print(\"no connecting", "inpt: for line in inpt: tmp = firstheadpat.findall(line) if len(tmp) > 0: datestr", "= atleast_1d(stations[0][overlapind]) if overlapstation.size == 1: overlapstation = overlapstation[0] otherind = in1d(stations[1], overlapstation)", "HOURS for i, sd in enumerate(dint): if sd != \"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int))", "txt, compress_type=ZIP_DEFLATED) # %% def getday(datafn, date, train, zipfn, doscrape): try: txt =", "timelefthours = ( (depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9 / 3600 ) timelefthours =", "laststop = stop if doplot and delay.shape[1] > 0 and in1d([\"delay\", \"all\"], doplot).any():", "\"T\" + dstr + ampm # add date to front # finally put", "= str2datetime(data[\"act\"], data[\"day\"], datestr) # %% have to skip ahead a day when", "None with StringIO(txt) as inpt: for line in inpt: tmp = firstheadpat.findall(line) if", "data.dropna(axis=0, how=\"all\") # needed for trailing blank lines data = data.replace(\"*\", nan) #", "301: # print('** 301 moved to ' + str(response.getheader('Location'))) # else: # print('**", "dates\") def plottrain(delay, train, dates, stop, doplot): if stop is None: stop =", "gethtml(url): response = urlopen(url) html = response.read().decode(\"utf-8\") # session.request(\"GET\", url) # response =", "= buildurl(train, date) # mass download, throttle to be polite sleep(2) html =", "\"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn, train, date): fn = Path(fn).expanduser() if fn.suffix", "due to no data\") def tohdf5(fn, data, date): from pandas import HDFStore h5", "timedelta from zipfile import ZipFile from bs4 import BeautifulSoup from re import compile", "days, trains, dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be made? \"\"\" # set_trace()", "4 hours early! dayflip = (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") < -4 # hours data.ix[dayflip,", "3600 ) timelefthours = pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind = (timelefthours <", "as f: html = f.read() txt = [gettxt(html)] elif fn.suffix == \"txt\": #", "str(response.status) + ' could not read ' + url) # html = '**", "inpt: arv = pd.read_fwf( inpt, colspecs=[(2, 5), (7, 8), (10, 15), (25, 30)],", "difference detected, possible parsing error!\") print(txt) print(data) data = None return data def", "daydiff = int(days[trains[0]]) - 1 arrival = actual[trains[0]].ix[overlapstation, :-daydiff] depart = actual[trains[1]].ix[overlapstation, daydiff:]", "(timelefthours < 0).values missedhours = timelefthours[missedind] if missedind.sum() > 0: print(missedhours) else: print(\"no", "+ dstr + ampm # add date to front # finally put to", "\" + train + \" on \" + date.strftime(\"%Y-%m-%d\") ) if zipfn is", "2 and len(actual[trains[0]]) > 0 and len(actual[trains[1]]) > 0 and len(dates) > int(days[trains[0]])", ") return arv def str2datetime(data, day, datestr): dstr = data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\")", ")\\d+') lastheadpat = compile(r\"^\\* V\") datestr = None with StringIO(txt) as inpt: for", "int can't use nan # ZERO PAD HOURS for i, sd in enumerate(dint):", "tmp[0] if len(lastheadpat.findall(line)) > 0: if datestr is None: # must be a", "stations if (data[\"diffdelay\"].abs() > 12).any(): print(\"** WARNING: excessive time difference detected, possible parsing", "datestr = getdept(txt, datereq) data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"],", "the needed data file for Train # \" + train + \" on", "5), (7, 8), (10, 15), (25, 30)], index_col=0, header=None, skiprows=0, converters={1: str}, )", "\" to \" + dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1] > 1: # late vs.", "len(dates) > int(days[trains[0]]) ): stations = [] for t in trains: stations.append(delays[t].index.values.tolist()) overlapind", "HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data h5.close() def tozip(zipfn, txt, date, train): from zipfile import", "\"a\") as z: zippath = buildzippath(train, date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) # %% def", "if len(lastheadpat.findall(line)) > 0: if datestr is None: # must be a zip", "> 12).any(): print(\"** WARNING: excessive time difference detected, possible parsing error!\") print(txt) print(data)", "from time import sleep import pandas as pd from matplotlib.pyplot import figure, show", "dstr + ampm # add date to front # finally put to datetime", ") / 60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff() # we don't expect the", "multiple trains try: ziptop = buildziptop(train, date) with ZipFile(ziptop, \"r\") as z: zippath", "raise ValueError(\"I dont know how to parse\", fn) return txt def gethtml(url): response", "atleast_1d, logical_and from datetime import timedelta from zipfile import ZipFile from bs4 import", "zipfile import ZIP_DEFLATED # store as text file like website # ziptop =", "\"\"\" # set_trace() if ( len(trains) == 2 and len(actual[trains[0]]) > 0 and", "used politely and sparingly \"\"\" # from http.client import HTTPConnection def plottrains(delays, actual,", "to skip ahead a day when delay rolls past midnight! # train wouldn't", "be polite sleep(2) html = gethtml(url) txt = gettxt(html) else: exit( \"you dont", "past midnight! # train wouldn't be more than 4 hours early! dayflip =", "and sparingly \"\"\" # from http.client import HTTPConnection def plottrains(delays, actual, days, trains,", "\"&selday=\" + date.strftime(\"%d\") return url def buildziptop(train, date): return Path(date.strftime(\"%Y\")) / (str(train) +", "trains, dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be made? \"\"\" # set_trace() if", "StringIO(lastline) as inpt: arv = pd.read_fwf( inpt, colspecs=[(2, 5), (7, 8), (10, 15),", "index_col=0, header=None, skiprows=0, converters={1: str}, ) return arv def str2datetime(data, day, datestr): dstr", "f: txt = f.read().decode(\"utf-8\") except KeyError: print(\"I dont find\", zippath) txt = None", "date): return Path(train) / \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn, train, date): fn", "first the departures data, datestr = getdept(txt, datereq) data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"], datestr)", "Late\") ax.set_ylabel(\"p(late)\") show() else: print(\"* skipped plotting due to no data\") def tohdf5(fn,", "use nan # ZERO PAD HOURS for i, sd in enumerate(dint): if sd", "if overlapstation.size == 1: overlapstation = overlapstation[0] otherind = in1d(stations[1], overlapstation) if otherind", "> 0 and in1d([\"delay\", \"all\"], doplot).any(): if delay.shape[1] < 6: ax = delay.plot(ax=figure().gca())", "http.client import HTTPConnection def plottrains(delays, actual, days, trains, dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can", "import HTTPConnection def plottrains(delays, actual, days, trains, dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection", "= ( (depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9 / 3600 ) timelefthours = pd.DataFrame(", "colspecs=[(2, 5), (16, 17), (19, 24), (31, 36)], index_col=0, header=None, skiprows=0, ) #", "= llrgx.findall(txt)[0] with StringIO(lastline) as inpt: arv = pd.read_fwf( inpt, colspecs=[(2, 5), (7,", "datadt[day == \"3\"] += timedelta(days=2) return datadt def buildurl(trainnum, date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\"", "str(trainnum) url += \"&selyear=\" + date.strftime(\"%Y\") url += \"&selmonth=\" + date.strftime(\"%m\") url +=", "Train # \" + train + \" on \" + date.strftime(\"%Y-%m-%d\") ) if", "format=\"%m/%d/%YT%I%M%p\", utc=True ) # seems to put time-zone aware to Eastern time.. #", "else: # print('** error ' + str(response.status) + ' could not read '", "with open(fn, \"r\") as f: html = f.read() txt = [gettxt(html)] elif fn.suffix", "pd from matplotlib.pyplot import figure, show \"\"\" This function should be used politely", "gettxt(html) else: exit( \"you dont seem to have the needed data file for", "# NOT relativedelta(days=1) datadt[day == \"3\"] += timedelta(days=2) return datadt def buildurl(trainnum, date):", "zippath) txt = None else: raise ValueError(\"I dont know how to parse\", fn)", "+ date.strftime(\"%Y\") url += \"&selmonth=\" + date.strftime(\"%m\") url += \"&selday=\" + date.strftime(\"%d\") return", "lastline = llrgx.findall(txt)[0] with StringIO(lastline) as inpt: arv = pd.read_fwf( inpt, colspecs=[(2, 5),", "import ZipFile from bs4 import BeautifulSoup from re import compile from io import", "= None print(\"failed to process \" + date.strftime(\"%Y-%m-%d\")) return data # %% def", "= compile('(?<=\\* Train )\\d+') lastheadpat = compile(r\"^\\* V\") datestr = None with StringIO(txt)", "= datestr + \"T\" + dstr + ampm # add date to front", "# %% first the departures data, datestr = getdept(txt, datereq) data[\"sked\"] = str2datetime(data[\"sked\"],", "= pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind = (timelefthours < 0).values missedhours =", "> 0: print(missedhours) else: print(\"no missed connections detected for \" + str(trains)) if", "stop if doplot and delay.shape[1] > 0 and in1d([\"delay\", \"all\"], doplot).any(): if delay.shape[1]", "print(txt) print(data) data = None return data def getdept(txt, datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\")", "return Path(date.strftime(\"%Y\")) / (str(train) + \".zip\") def buildzippath(train, date): return Path(train) / \"\".join([train,", "= None with StringIO(txt) as inpt: for line in inpt: tmp = firstheadpat.findall(line)", "urlopen(url) html = response.read().decode(\"utf-8\") # session.request(\"GET\", url) # response = session.getresponse() # if", "delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else: ax = delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours", "compile(r\"^\\* V\") datestr = None with StringIO(txt) as inpt: for line in inpt:", "delay\") ax.set_title( \"Train #\" + train + \" \" + dates[0].strftime(\"%Y/%m/%d\") + \"", "drop blank rows before appending arrival data = data.dropna(axis=0, how=\"all\") # needed for", "line of the file data.columns = [\"day\", \"sked\", \"act\"] return data, datestr def", "last station ax.set_title(\"Hours late to \" + laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") # histogram", "datestr): dstr = data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\") + \"M\" dint = dstr.astype(float) #", "info or too few dates\") def plottrain(delay, train, dates, stop, doplot): if stop", "\"{:04d}\".format(sd.astype(int)) dstr = datestr + \"T\" + dstr + ampm # add date", "\"r\") as f: txt = f.read().decode(\"utf-8\") except KeyError: print(\"I dont find\", zippath) txt", "data = None return data def getdept(txt, datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not", "elif fn.suffix == \"\": # single or multiple trains try: ziptop = buildziptop(train,", "print(\"I dont find\", zippath) txt = None else: raise ValueError(\"I dont know how", "txt = filehandler(datafn, train, date) except FileNotFoundError: if doscrape: print(\"* WARNING: beginning web", "logical_and(depart.notnull(), arrival.notnull()).values timelefthours = ( (depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9 / 3600 )", "= response.read().decode('utf-8') # elif response.status == 301: # print('** 301 moved to '", "laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show() else: print(\"* skipped plotting due to no data\")", "< -4 # hours data.ix[dayflip, \"act\"] += timedelta(days=1) data[\"delayhours\"] = (data[\"act\"] - data[\"sked\"]).astype(", "missed connections detected for \" + str(trains)) if goodtimes.size < 6 and in1d([\"conn\",", "ax.set_title(str(trains) + \" made connection at \" + overlapstation) ax.set_ylabel(\"Hours left to connect\")", "overlapstation) ax.set_ylabel(\"Hours left to connect\") show() # print(goodtimes) # print(depart[goodtimes].index) # print((depart.values-arrival.values)) #", "that blank lines are gone, we swap for nan data.ix[-1] = arv.ix[0] #", "skip ahead a day when delay rolls past midnight! # train wouldn't be", "date.strftime(\"%Y\") url += \"&selmonth=\" + date.strftime(\"%m\") url += \"&selday=\" + date.strftime(\"%d\") return url", "# mass download, throttle to be polite sleep(2) html = gethtml(url) txt =", "whis=[10, 90], ax=figure().gca() ) ax.set_title(str(trains) + \" made connection at \" + overlapstation)", "handled yet\") else: print(\"skipped connection analysis due to missing train info or too", "\".txt\"]) def filehandler(fn, train, date): fn = Path(fn).expanduser() if fn.suffix in \"html\": #", "Path from urllib.request import urlopen from numpy import nan, in1d, atleast_1d, logical_and from", "txt = None else: raise ValueError(\"I dont know how to parse\", fn) return", "data def getdept(txt, datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for zip files! #", "like website # ziptop = 'test' + buildziptop(train,date) with ZipFile(zipfn, \"a\") as z:", "KeyError: print(\"I dont find\", zippath) txt = None else: raise ValueError(\"I dont know", "for overuse!\") url = buildurl(train, date) # mass download, throttle to be polite", "sleep import pandas as pd from matplotlib.pyplot import figure, show \"\"\" This function", "ampm = data.str.extract(\"([AP])\") + \"M\" dint = dstr.astype(float) # int can't use nan", "datestr = None with StringIO(txt) as inpt: for line in inpt: tmp =", "= (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") < -4 # hours data.ix[dayflip, \"act\"] += timedelta(days=1) data[\"delayhours\"]", "= data h5.close() def tozip(zipfn, txt, date, train): from zipfile import ZIP_DEFLATED #", "def gettxt(html): soup = BeautifulSoup(html) txt = soup.get_text() return txt def getdata(txt, datereq):", "seem to have the needed data file for Train # \" + train", "= timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca() ) ax.set_title(str(trains) + \" made connection", "ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show() else: print(\"* skipped plotting due to no data\") def", "txt = gettxt(html) else: exit( \"you dont seem to have the needed data", "data.replace(\"*\", nan) # now that blank lines are gone, we swap for nan", "doscrape: print(\"* WARNING: beginning web scrape--be polite, they ban for overuse!\") url =", "elif overlapstation.size == 0: print(\"no connecting station found\") else: print(\"more than 1 connection", "else: daydiff = int(days[trains[0]]) - 1 arrival = actual[trains[0]].ix[overlapstation, :-daydiff] depart = actual[trains[1]].ix[overlapstation,", "12 hours between stations if (data[\"diffdelay\"].abs() > 12).any(): print(\"** WARNING: excessive time difference", "WARNING: excessive time difference detected, possible parsing error!\") print(txt) print(data) data = None", "\"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn, train, date): fn = Path(fn).expanduser() if fn.suffix in", "timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind = (timelefthours < 0).values missedhours = timelefthours[missedind] if", "date, train): from zipfile import ZIP_DEFLATED # store as text file like website", "how to parse\", fn) return txt def gethtml(url): response = urlopen(url) html =", "fn) return txt def gethtml(url): response = urlopen(url) html = response.read().decode(\"utf-8\") # session.request(\"GET\",", "# seems to put time-zone aware to Eastern time.. # multi-day trips datadt[day", "# store as text file like website # ziptop = 'test' + buildziptop(train,date)", "stop is None: stop = -1 laststop = delay.index[-1] else: laststop = stop", "ZERO PAD HOURS for i, sd in enumerate(dint): if sd != \"NaN\": dstr[i]", "sd != \"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr = datestr + \"T\" + dstr", ":-daydiff] depart = actual[trains[1]].ix[overlapstation, daydiff:] # set_trace() goodtimes = logical_and(depart.notnull(), arrival.notnull()).values timelefthours =", "dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be made? \"\"\" # set_trace() if (", "time difference detected, possible parsing error!\") print(txt) print(data) data = None return data", "%% def gettxt(html): soup = BeautifulSoup(html) txt = soup.get_text() return txt def getdata(txt,", "datadt[day == \"2\"] += timedelta(days=1) # NOT relativedelta(days=1) datadt[day == \"3\"] += timedelta(days=2)", "0: datestr = tmp[0] if len(lastheadpat.findall(line)) > 0: if datestr is None: #", "WARNING: beginning web scrape--be polite, they ban for overuse!\") url = buildurl(train, date)", "open(fn, \"r\") as f: html = f.read() txt = [gettxt(html)] elif fn.suffix ==", "hours data.ix[dayflip, \"act\"] += timedelta(days=1) data[\"delayhours\"] = (data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\" ) /", "> overlapind: daydiff = int(days[trains[1]]) - 1 arrival = actual[trains[1]].ix[overlapstation, :-daydiff] depart =", "sleep(2) html = gethtml(url) txt = gettxt(html) else: exit( \"you dont seem to", "fn.suffix in \"html\": # single train with open(fn, \"r\") as f: html =", "numpy import nan, in1d, atleast_1d, logical_and from datetime import timedelta from zipfile import", "str}, ) return arv def str2datetime(data, day, datestr): dstr = data.str.extract(r\"(\\d+)\") ampm =", "url) # response = session.getresponse() # if response.status == 200: # html =", "delay.shape[1] > 0 and in1d([\"delay\", \"all\"], doplot).any(): if delay.shape[1] < 6: ax =", "detected, possible parsing error!\") print(txt) print(data) data = None return data def getdept(txt,", "train, date): fn = Path(fn).expanduser() if fn.suffix in \"html\": # single train with", "firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for zip files! # trainpat = compile('(?<=\\* Train", "with ZipFile(ziptop, \"r\") as z: zippath = buildzippath(train, date) with z.open(zippath, \"r\") as", "# needed for trailing blank lines data = data.replace(\"*\", nan) # now that", "actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff = int(days[trains[0]]) - 1 arrival = actual[trains[0]].ix[overlapstation, :-daydiff] depart", "stations = [] for t in trains: stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0], stations[1]) overlapstation", "analysis due to missing train info or too few dates\") def plottrain(delay, train,", "1 connection found, this case isnt handled yet\") else: print(\"skipped connection analysis due", "to jump more than 12 hours between stations if (data[\"diffdelay\"].abs() > 12).any(): print(\"**", "0: print(\"no connecting station found\") else: print(\"more than 1 connection found, this case", "train, dates, stop, doplot): if stop is None: stop = -1 laststop =", "is None: stop = -1 laststop = delay.index[-1] else: laststop = stop if", "import ZIP_DEFLATED # store as text file like website # ziptop = 'test'", "= None else: raise ValueError(\"I dont know how to parse\", fn) return txt", "ax.set_xlabel(\"date\") # histogram ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram: Hours late to \"", "as inpt: for line in inpt: tmp = firstheadpat.findall(line) if len(tmp) > 0:", "urlopen from numpy import nan, in1d, atleast_1d, logical_and from datetime import timedelta from", "<reponame>scienceopen/amtrak-connections from pathlib import Path from urllib.request import urlopen from numpy import nan,", "= delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\" ) # plots last station ax.set_title(\"Hours late to", "['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf( inpt, colspecs=[(2, 5), (16, 17), (19, 24), (31, 36)],", "arrival.notnull()).values timelefthours = ( (depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9 / 3600 ) timelefthours", "datestr + \"T\" + dstr + ampm # add date to front #", "+ date.strftime(\"%Y-%m-%d\")) return data # %% def gettxt(html): soup = BeautifulSoup(html) txt =", "ax.set_title(\"Hours late to \" + laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") # histogram ax =", "with StringIO(txt) as inpt: for line in inpt: tmp = firstheadpat.findall(line) if len(tmp)", "url) # html = '** could not read ' + url return html", "as f: txt = f.read().decode(\"utf-8\") except KeyError: print(\"I dont find\", zippath) txt =", "\" \" + dates[0].strftime(\"%Y/%m/%d\") + \" to \" + dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1]", "def str2datetime(data, day, datestr): dstr = data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\") + \"M\" dint", "html = f.read() txt = [gettxt(html)] elif fn.suffix == \"txt\": # single train", "urllib.request import urlopen from numpy import nan, in1d, atleast_1d, logical_and from datetime import", "to Eastern time.. # multi-day trips datadt[day == \"2\"] += timedelta(days=1) # NOT", "len(actual[trains[1]]) > 0 and len(dates) > int(days[trains[0]]) ): stations = [] for t", "# single or multiple trains try: ziptop = buildziptop(train, date) with ZipFile(ziptop, \"r\")", "data file for Train # \" + train + \" on \" +", "session.request(\"GET\", url) # response = session.getresponse() # if response.status == 200: # html", "index_col=0, header=None, skiprows=0, ) # %% append last arrival (destination) arv = getarv(txt)", "html = response.read().decode('utf-8') # elif response.status == 301: # print('** 301 moved to", "# single train with open(fn, \"r\") as f: txt = [f.read()] elif fn.suffix", "else: exit( \"you dont seem to have the needed data file for Train", "len(lastheadpat.findall(line)) > 0: if datestr is None: # must be a zip file", "daydiff:] # set_trace() goodtimes = logical_and(depart.notnull(), arrival.notnull()).values timelefthours = ( (depart[goodtimes].values - arrival[goodtimes].values).astype(float)", "\" + overlapstation) ax.set_ylabel(\"Hours left to connect\") show() # print(goodtimes) # print(depart[goodtimes].index) #", "- data[\"sked\"]).astype( \"timedelta64[m]\" ) / 60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff() # we", "return arv def str2datetime(data, day, datestr): dstr = data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\") +", "dstr.astype(float) # int can't use nan # ZERO PAD HOURS for i, sd", "for \" + str(trains)) if goodtimes.size < 6 and in1d([\"conn\", \"all\"], doplot).any(): ax", "zip file where no dates are give datestr = datereq.strftime(\"%m/%d/%Y\") break # data", "end of route ax = delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\" ) # plots last", "data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"], datestr) # %% have to skip ahead a day", "ax = timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca() ) ax.set_title(str(trains) + \" made", "arrival = actual[trains[1]].ix[overlapstation, :-daydiff] depart = actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff = int(days[trains[0]]) -", ") ax.set_title(str(trains) + \" made connection at \" + overlapstation) ax.set_ylabel(\"Hours left to", "print(arrival.values) # print(depart.values) elif overlapstation.size == 0: print(\"no connecting station found\") else: print(\"more", "= \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum) url += \"&selyear=\" + date.strftime(\"%Y\") url += \"&selmonth=\"", "doplot).any(): ax = timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"], doplot).any(): ax =", "data[\"delayhours\"].diff() # we don't expect the delay to jump more than 12 hours", "data.columns = [\"day\", \"sked\", \"act\"] return data, datestr def getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\")", "to \" + zipfn) tozip(zipfn, txt, date, train) try: data = getdata(txt, date)", "actual[trains[1]].ix[overlapstation, daydiff:] # set_trace() goodtimes = logical_and(depart.notnull(), arrival.notnull()).values timelefthours = ( (depart[goodtimes].values -", "\"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr = datestr + \"T\" + dstr + ampm", "else: print(\"* skipped plotting due to no data\") def tohdf5(fn, data, date): from", "txt def gethtml(url): response = urlopen(url) html = response.read().decode(\"utf-8\") # session.request(\"GET\", url) #", "+ \" \" + dates[0].strftime(\"%Y/%m/%d\") + \" to \" + dates[-1].strftime(\"%Y/%m/%d\") ) if", "zipfn) tozip(zipfn, txt, date, train) try: data = getdata(txt, date) except StopIteration: data", "datadt def buildurl(trainnum, date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum) url += \"&selyear=\"", "' + str(response.status) + ' could not read ' + url) # html", "stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0], stations[1]) overlapstation = atleast_1d(stations[0][overlapind]) if overlapstation.size == 1: overlapstation", "hours between stations if (data[\"diffdelay\"].abs() > 12).any(): print(\"** WARNING: excessive time difference detected,", "import figure, show \"\"\" This function should be used politely and sparingly \"\"\"", ") if delay.shape[1] > 1: # late vs. date end of route ax", "daydiff = int(days[trains[1]]) - 1 arrival = actual[trains[1]].ix[overlapstation, :-daydiff] depart = actual[trains[0]].ix[overlapstation, daydiff:]", "= logical_and(depart.notnull(), arrival.notnull()).values timelefthours = ( (depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9 / 3600", "str2datetime(data[\"act\"], data[\"day\"], datestr) # %% have to skip ahead a day when delay", "train) try: data = getdata(txt, date) except StopIteration: data = None print(\"failed to", "24), (31, 36)], index_col=0, header=None, skiprows=0, ) # %% append last arrival (destination)", "\" on \" + date.strftime(\"%Y-%m-%d\") ) if zipfn is not None: print(\"writing \"", "found\") else: print(\"more than 1 connection found, this case isnt handled yet\") else:", "otherind > overlapind: daydiff = int(days[trains[1]]) - 1 arrival = actual[trains[1]].ix[overlapstation, :-daydiff] depart", "f.read() txt = [gettxt(html)] elif fn.suffix == \"txt\": # single train with open(fn,", "are give datestr = datereq.strftime(\"%m/%d/%Y\") break # data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns =", "midnight! # train wouldn't be more than 4 hours early! dayflip = (data[\"act\"]", "# print(depart.values) elif overlapstation.size == 0: print(\"no connecting station found\") else: print(\"more than", "\"2\"] += timedelta(days=1) # NOT relativedelta(days=1) datadt[day == \"3\"] += timedelta(days=2) return datadt", "= data[\"delayhours\"].diff() # we don't expect the delay to jump more than 12", "%% have to skip ahead a day when delay rolls past midnight! #", "index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind = (timelefthours < 0).values missedhours = timelefthours[missedind] if missedind.sum()", "elif in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca() )", "delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title( \"Train #\" + train", "plotting due to no data\") def tohdf5(fn, data, date): from pandas import HDFStore", "file for Train # \" + train + \" on \" + date.strftime(\"%Y-%m-%d\")", "firstheadpat.findall(line) if len(tmp) > 0: datestr = tmp[0] if len(lastheadpat.findall(line)) > 0: if", "should be used politely and sparingly \"\"\" # from http.client import HTTPConnection def", "= getdept(txt, datereq) data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"], datestr)", "datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for zip files! # trainpat = compile('(?<=\\*", "dates are give datestr = datereq.strftime(\"%m/%d/%Y\") break # data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns", "= actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff = int(days[trains[0]]) - 1 arrival = actual[trains[0]].ix[overlapstation, :-daydiff]", "= dstr.astype(float) # int can't use nan # ZERO PAD HOURS for i,", "know how to parse\", fn) return txt def gethtml(url): response = urlopen(url) html", "download, throttle to be polite sleep(2) html = gethtml(url) txt = gettxt(html) else:", "data = pd.read_fwf( inpt, colspecs=[(2, 5), (16, 17), (19, 24), (31, 36)], index_col=0,", "Hours late to \" + laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show() else: print(\"* skipped", "None print(\"failed to process \" + date.strftime(\"%Y-%m-%d\")) return data # %% def gettxt(html):", "= BeautifulSoup(html) txt = soup.get_text() return txt def getdata(txt, datereq): # %% first", "# we know arrival is one line, the last line of the file", "no data\") def tohdf5(fn, data, date): from pandas import HDFStore h5 = HDFStore(fn)", "+ zipfn) tozip(zipfn, txt, date, train) try: data = getdata(txt, date) except StopIteration:", "# response = session.getresponse() # if response.status == 200: # html = response.read().decode('utf-8')", "def getday(datafn, date, train, zipfn, doscrape): try: txt = filehandler(datafn, train, date) except", "we swap for nan data.ix[-1] = arv.ix[0] # we know arrival is one", "show() else: print(\"* skipped plotting due to no data\") def tohdf5(fn, data, date):", "# %% drop blank rows before appending arrival data = data.dropna(axis=0, how=\"all\") #", "if fn.suffix in \"html\": # single train with open(fn, \"r\") as f: html", "more than 4 hours early! dayflip = (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") < -4 #", "import pandas as pd from matplotlib.pyplot import figure, show \"\"\" This function should", "pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"] ) missedind = (timelefthours < 0).values missedhours = timelefthours[missedind]", "# ZERO PAD HOURS for i, sd in enumerate(dint): if sd != \"NaN\":", "- arrival[goodtimes].values).astype(float) / 1e9 / 3600 ) timelefthours = pd.DataFrame( timelefthours, index=depart[goodtimes].index, columns=[\"hoursleft\"]", "= 'test' + buildziptop(train,date) with ZipFile(zipfn, \"a\") as z: zippath = buildzippath(train, date)", "of route ax = delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\" ) # plots last station", "response = session.getresponse() # if response.status == 200: # html = response.read().decode('utf-8') #", "jump more than 12 hours between stations if (data[\"diffdelay\"].abs() > 12).any(): print(\"** WARNING:", "datestr def getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r in lookbehind lastline =", "(data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\" ) / 60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff() #", "# now that blank lines are gone, we swap for nan data.ix[-1] =", "ZipFile(zipfn, \"a\") as z: zippath = buildzippath(train, date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) # %%", "ax.set_title( \"Train #\" + train + \" \" + dates[0].strftime(\"%Y/%m/%d\") + \" to", "tmp = firstheadpat.findall(line) if len(tmp) > 0: datestr = tmp[0] if len(lastheadpat.findall(line)) >", "= timelefthours[missedind] if missedind.sum() > 0: print(missedhours) else: print(\"no missed connections detected for", "1 arrival = actual[trains[1]].ix[overlapstation, :-daydiff] depart = actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff = int(days[trains[0]])", "\"\"\" This function should be used politely and sparingly \"\"\" # from http.client", "actual, days, trains, dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be made? \"\"\" #", "time-zone aware to Eastern time.. # multi-day trips datadt[day == \"2\"] += timedelta(days=1)", "= int(days[trains[0]]) - 1 arrival = actual[trains[0]].ix[overlapstation, :-daydiff] depart = actual[trains[1]].ix[overlapstation, daydiff:] #", "url = buildurl(train, date) # mass download, throttle to be polite sleep(2) html", "( (depart[goodtimes].values - arrival[goodtimes].values).astype(float) / 1e9 / 3600 ) timelefthours = pd.DataFrame( timelefthours,", "= buildzippath(train, date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) # %% def getday(datafn, date, train, zipfn,", "missing train info or too few dates\") def plottrain(delay, train, dates, stop, doplot):", "can't use nan # ZERO PAD HOURS for i, sd in enumerate(dint): if", "ban for overuse!\") url = buildurl(train, date) # mass download, throttle to be", "and in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"],", "than 4 hours early! dayflip = (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") < -4 # hours", "train with open(fn, \"r\") as f: txt = [f.read()] elif fn.suffix == \"\":", "nan data.ix[-1] = arv.ix[0] # we know arrival is one line, the last", ") # plots last station ax.set_title(\"Hours late to \" + laststop) ax.set_ylabel(\"Hours Late\")", "data[\"sked\"]).astype(\"timedelta64[h]\") < -4 # hours data.ix[dayflip, \"act\"] += timedelta(days=1) data[\"delayhours\"] = (data[\"act\"] -", "to \" + laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") # histogram ax = delay.ix[stop].hist(ax=figure().gca(), normed=1,", "overlapstation = atleast_1d(stations[0][overlapind]) if overlapstation.size == 1: overlapstation = overlapstation[0] otherind = in1d(stations[1],", "set_trace() if ( len(trains) == 2 and len(actual[trains[0]]) > 0 and len(actual[trains[1]]) >", "# not for zip files! # trainpat = compile('(?<=\\* Train )\\d+') lastheadpat =", "know arrival is one line, the last line of the file data.columns =", "= ['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf( inpt, colspecs=[(2, 5), (16, 17), (19, 24), (31,", "(10, 15), (25, 30)], index_col=0, header=None, skiprows=0, converters={1: str}, ) return arv def", "date) with ZipFile(ziptop, \"r\") as z: zippath = buildzippath(train, date) with z.open(zippath, \"r\")", "= buildzippath(train, date) with z.open(zippath, \"r\") as f: txt = f.read().decode(\"utf-8\") except KeyError:", "/ (str(train) + \".zip\") def buildzippath(train, date): return Path(train) / \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"),", "line in inpt: tmp = firstheadpat.findall(line) if len(tmp) > 0: datestr = tmp[0]", "seems to put time-zone aware to Eastern time.. # multi-day trips datadt[day ==", "getday(datafn, date, train, zipfn, doscrape): try: txt = filehandler(datafn, train, date) except FileNotFoundError:", "else: print(\"skipped connection analysis due to missing train info or too few dates\")", "before appending arrival data = data.dropna(axis=0, how=\"all\") # needed for trailing blank lines", "zipfn, doscrape): try: txt = filehandler(datafn, train, date) except FileNotFoundError: if doscrape: print(\"*", "response = urlopen(url) html = response.read().decode(\"utf-8\") # session.request(\"GET\", url) # response = session.getresponse()", "this case isnt handled yet\") else: print(\"skipped connection analysis due to missing train", "dont find\", zippath) txt = None else: raise ValueError(\"I dont know how to", "= data.replace(\"*\", nan) # now that blank lines are gone, we swap for", "+ overlapstation) ax.set_ylabel(\"Hours left to connect\") show() # print(goodtimes) # print(depart[goodtimes].index) # print((depart.values-arrival.values))", "if doplot and delay.shape[1] > 0 and in1d([\"delay\", \"all\"], doplot).any(): if delay.shape[1] <", "ax=figure().gca(), linestyle=\"\", marker=\"*\" ) # plots last station ax.set_title(\"Hours late to \" +", "\"all\"], doplot).any(): ax = timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"], doplot).any(): ax", "# histogram ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram: Hours late to \" +", "\" made connection at \" + overlapstation) ax.set_ylabel(\"Hours left to connect\") show() #", "exit( \"you dont seem to have the needed data file for Train #", "# .values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff() # we don't expect the delay to jump", "z: zippath = buildzippath(train, date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) # %% def getday(datafn, date,", "= compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r in lookbehind lastline = llrgx.findall(txt)[0] with StringIO(lastline) as", "late to \" + laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show() else: print(\"* skipped plotting", "missedhours = timelefthours[missedind] if missedind.sum() > 0: print(missedhours) else: print(\"no missed connections detected", "# we don't expect the delay to jump more than 12 hours between", "= delay.index[-1] else: laststop = stop if doplot and delay.shape[1] > 0 and", "txt, date, train): from zipfile import ZIP_DEFLATED # store as text file like", "# %% def getday(datafn, date, train, zipfn, doscrape): try: txt = filehandler(datafn, train,", "ax.legend(loc=\"best\", fontsize=8) else: ax = delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\")", "getdept(txt, datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for zip files! # trainpat =", "we know arrival is one line, the last line of the file data.columns", "one line, the last line of the file data.columns = [\"day\", \"sked\", \"act\"]", "zipfn is not None: print(\"writing \" + date.strftime(\"%Y-%m-%d\") + \" to \" +", "io import StringIO from time import sleep import pandas as pd from matplotlib.pyplot", "+ str(response.status) + ' could not read ' + url) # html =", "from bs4 import BeautifulSoup from re import compile from io import StringIO from", "be made? \"\"\" # set_trace() if ( len(trains) == 2 and len(actual[trains[0]]) >", "train, zipfn, doscrape): try: txt = filehandler(datafn, train, date) except FileNotFoundError: if doscrape:", "def gethtml(url): response = urlopen(url) html = response.read().decode(\"utf-8\") # session.request(\"GET\", url) # response", "overlapind: daydiff = int(days[trains[1]]) - 1 arrival = actual[trains[1]].ix[overlapstation, :-daydiff] depart = actual[trains[0]].ix[overlapstation,", "+ dates[0].strftime(\"%Y/%m/%d\") + \" to \" + dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1] > 1:", "return datadt def buildurl(trainnum, date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum) url +=", "with StringIO(lastline) as inpt: arv = pd.read_fwf( inpt, colspecs=[(2, 5), (7, 8), (10,", "single or multiple trains try: ziptop = buildziptop(train, date) with ZipFile(ziptop, \"r\") as", "parse\", fn) return txt def gethtml(url): response = urlopen(url) html = response.read().decode(\"utf-8\") #", "data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"], datestr) # %% have", "8), (10, 15), (25, 30)], index_col=0, header=None, skiprows=0, converters={1: str}, ) return arv", "from pathlib import Path from urllib.request import urlopen from numpy import nan, in1d,", "= f.read() txt = [gettxt(html)] elif fn.suffix == \"txt\": # single train with", "date, train) try: data = getdata(txt, date) except StopIteration: data = None print(\"failed", "36)], index_col=0, header=None, skiprows=0, ) # %% append last arrival (destination) arv =", "in enumerate(dint): if sd != \"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr = datestr +", "StopIteration: data = None print(\"failed to process \" + date.strftime(\"%Y-%m-%d\")) return data #", "= buildziptop(train, date) with ZipFile(ziptop, \"r\") as z: zippath = buildzippath(train, date) with", "from zipfile import ZIP_DEFLATED # store as text file like website # ziptop", "function should be used politely and sparingly \"\"\" # from http.client import HTTPConnection", "\"r\") as f: txt = [f.read()] elif fn.suffix == \"\": # single or", "= pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True ) # seems to put time-zone aware to", "ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else: ax = delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca())", "txt, date, train) try: data = getdata(txt, date) except StopIteration: data = None", "mass download, throttle to be polite sleep(2) html = gethtml(url) txt = gettxt(html)", "datereq.strftime(\"%m/%d/%Y\") break # data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns = ['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf(", "file data.columns = [\"day\", \"sked\", \"act\"] return data, datestr def getarv(txt): llrgx =", "(str(train) + \".zip\") def buildzippath(train, date): return Path(train) / \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"])", "to ' + str(response.getheader('Location'))) # else: # print('** error ' + str(response.status) +", "else: print(\"no missed connections detected for \" + str(trains)) if goodtimes.size < 6", "print(\"no connecting station found\") else: print(\"more than 1 connection found, this case isnt", "+ dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1] > 1: # late vs. date end of", "from datetime import timedelta from zipfile import ZipFile from bs4 import BeautifulSoup from", "[f.read()] elif fn.suffix == \"\": # single or multiple trains try: ziptop =", "datestr is None: # must be a zip file where no dates are", "case isnt handled yet\") else: print(\"skipped connection analysis due to missing train info", "train, date) except FileNotFoundError: if doscrape: print(\"* WARNING: beginning web scrape--be polite, they", "made connection at \" + overlapstation) ax.set_ylabel(\"Hours left to connect\") show() # print(goodtimes)", "data.columns = ['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf( inpt, colspecs=[(2, 5), (16, 17), (19, 24),", ":-daydiff] depart = actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff = int(days[trains[0]]) - 1 arrival =", "len(actual[trains[0]]) > 0 and len(actual[trains[1]]) > 0 and len(dates) > int(days[trains[0]]) ): stations", "arrival data = data.dropna(axis=0, how=\"all\") # needed for trailing blank lines data =", "else: laststop = stop if doplot and delay.shape[1] > 0 and in1d([\"delay\", \"all\"],", "# ziptop = 'test' + buildziptop(train,date) with ZipFile(zipfn, \"a\") as z: zippath =", "in inpt: tmp = firstheadpat.findall(line) if len(tmp) > 0: datestr = tmp[0] if", "for trailing blank lines data = data.replace(\"*\", nan) # now that blank lines", "\"r\") as z: zippath = buildzippath(train, date) with z.open(zippath, \"r\") as f: txt", "print(\"* skipped plotting due to no data\") def tohdf5(fn, data, date): from pandas", "vs. date end of route ax = delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\" ) #", "print(\"skipped connection analysis due to missing train info or too few dates\") def", "ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca()", "beginning web scrape--be polite, they ban for overuse!\") url = buildurl(train, date) #", "def buildzippath(train, date): return Path(train) / \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn, train,", "response.status == 200: # html = response.read().decode('utf-8') # elif response.status == 301: #", "z: zippath = buildzippath(train, date) with z.open(zippath, \"r\") as f: txt = f.read().decode(\"utf-8\")", "# set_trace() if ( len(trains) == 2 and len(actual[trains[0]]) > 0 and len(actual[trains[1]])", "a day when delay rolls past midnight! # train wouldn't be more than", "txt = soup.get_text() return txt def getdata(txt, datereq): # %% first the departures", "histogram ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram: Hours late to \" + laststop)", "\" + date.strftime(\"%Y-%m-%d\") ) if zipfn is not None: print(\"writing \" + date.strftime(\"%Y-%m-%d\")", "# hours data.ix[dayflip, \"act\"] += timedelta(days=1) data[\"delayhours\"] = (data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\" )", "logical_and from datetime import timedelta from zipfile import ZipFile from bs4 import BeautifulSoup", "url def buildziptop(train, date): return Path(date.strftime(\"%Y\")) / (str(train) + \".zip\") def buildzippath(train, date):", "+= \"&selyear=\" + date.strftime(\"%Y\") url += \"&selmonth=\" + date.strftime(\"%m\") url += \"&selday=\" +", "= soup.get_text() return txt def getdata(txt, datereq): # %% first the departures data,", "inpt, colspecs=[(2, 5), (16, 17), (19, 24), (31, 36)], index_col=0, header=None, skiprows=0, )", "t in trains: stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0], stations[1]) overlapstation = atleast_1d(stations[0][overlapind]) if overlapstation.size", "doplot).any(): ax = timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca() ) ax.set_title(str(trains) + \"", "ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") # histogram ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram: Hours late", "from pandas import HDFStore h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data h5.close() def tozip(zipfn,", "print(\"** WARNING: excessive time difference detected, possible parsing error!\") print(txt) print(data) data =", "overlapstation.size == 1: overlapstation = overlapstation[0] otherind = in1d(stations[1], overlapstation) if otherind >", "gethtml(url) txt = gettxt(html) else: exit( \"you dont seem to have the needed", "for Train # \" + train + \" on \" + date.strftime(\"%Y-%m-%d\") )", "laststop = delay.index[-1] else: laststop = stop if doplot and delay.shape[1] > 0", "= gettxt(html) else: exit( \"you dont seem to have the needed data file", "timedelta(days=1) data[\"delayhours\"] = (data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\" ) / 60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"]", "expect the delay to jump more than 12 hours between stations if (data[\"diffdelay\"].abs()", "the departures data, datestr = getdept(txt, datereq) data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"]", "5), (16, 17), (19, 24), (31, 36)], index_col=0, header=None, skiprows=0, ) # %%", "dstr, format=\"%m/%d/%YT%I%M%p\", utc=True ) # seems to put time-zone aware to Eastern time..", "def filehandler(fn, train, date): fn = Path(fn).expanduser() if fn.suffix in \"html\": # single", "\"txt\": # single train with open(fn, \"r\") as f: txt = [f.read()] elif", "ax = timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.boxplot(", "be more than 4 hours early! dayflip = (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") < -4", "== 301: # print('** 301 moved to ' + str(response.getheader('Location'))) # else: #", "scrape--be polite, they ban for overuse!\") url = buildurl(train, date) # mass download,", "date) except StopIteration: data = None print(\"failed to process \" + date.strftime(\"%Y-%m-%d\")) return", "delay.shape[1] < 6: ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else: ax = delay.T.boxplot(return_type=\"axes\", rot=90,", "single train with open(fn, \"r\") as f: txt = [f.read()] elif fn.suffix ==", "last arrival (destination) arv = getarv(txt) # %% drop blank rows before appending", "- 1 arrival = actual[trains[1]].ix[overlapstation, :-daydiff] depart = actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff =", "error ' + str(response.status) + ' could not read ' + url) #", "= int(days[trains[1]]) - 1 arrival = actual[trains[1]].ix[overlapstation, :-daydiff] depart = actual[trains[0]].ix[overlapstation, daydiff:] else:", "# %% def gettxt(html): soup = BeautifulSoup(html) txt = soup.get_text() return txt def", "None: # must be a zip file where no dates are give datestr", "z.open(zippath, \"r\") as f: txt = f.read().decode(\"utf-8\") except KeyError: print(\"I dont find\", zippath)", "from urllib.request import urlopen from numpy import nan, in1d, atleast_1d, logical_and from datetime", "import timedelta from zipfile import ZipFile from bs4 import BeautifulSoup from re import", "fn = Path(fn).expanduser() if fn.suffix in \"html\": # single train with open(fn, \"r\")", "f: txt = [f.read()] elif fn.suffix == \"\": # single or multiple trains", "columns=[\"hoursleft\"] ) missedind = (timelefthours < 0).values missedhours = timelefthours[missedind] if missedind.sum() >", "read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns = ['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf( inpt, colspecs=[(2, 5), (16, 17),", "trains: stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0], stations[1]) overlapstation = atleast_1d(stations[0][overlapind]) if overlapstation.size == 1:", "if goodtimes.size < 6 and in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False)", "+= timedelta(days=1) data[\"delayhours\"] = (data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\" ) / 60 # .values.astype(float)/1e9/3600", "depart = actual[trains[1]].ix[overlapstation, daydiff:] # set_trace() goodtimes = logical_and(depart.notnull(), arrival.notnull()).values timelefthours = (", "not None: print(\"writing \" + date.strftime(\"%Y-%m-%d\") + \" to \" + zipfn) tozip(zipfn,", "print(\"* WARNING: beginning web scrape--be polite, they ban for overuse!\") url = buildurl(train,", "\"timedelta64[m]\" ) / 60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff() # we don't expect", "depart = actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff = int(days[trains[0]]) - 1 arrival = actual[trains[0]].ix[overlapstation,", "print(\"no missed connections detected for \" + str(trains)) if goodtimes.size < 6 and", "as inpt: arv = pd.read_fwf( inpt, colspecs=[(2, 5), (7, 8), (10, 15), (25,", "> 1: # late vs. date end of route ax = delay.ix[stop].plot( ax=figure().gca(),", "NOT relativedelta(days=1) datadt[day == \"3\"] += timedelta(days=2) return datadt def buildurl(trainnum, date): url", "> 0 and len(dates) > int(days[trains[0]]) ): stations = [] for t in", "web scrape--be polite, they ban for overuse!\") url = buildurl(train, date) # mass", "Path(fn).expanduser() if fn.suffix in \"html\": # single train with open(fn, \"r\") as f:", ") # %% append last arrival (destination) arv = getarv(txt) # %% drop", "dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1] > 1: # late vs. date end of route", "= [] for t in trains: stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0], stations[1]) overlapstation =", "needed data file for Train # \" + train + \" on \"", "def getdept(txt, datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for zip files! # trainpat", "\"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum) url += \"&selyear=\" + date.strftime(\"%Y\") url += \"&selmonth=\" +", "buildziptop(train, date): return Path(date.strftime(\"%Y\")) / (str(train) + \".zip\") def buildzippath(train, date): return Path(train)", "+ \" to \" + zipfn) tozip(zipfn, txt, date, train) try: data =", "buildzippath(train, date): return Path(train) / \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn, train, date):", "# print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) # print(arrival.values) # print(depart.values) elif overlapstation.size == 0: print(\"no", "# print((depart.values-arrival.values).astype(float)) # print(arrival.values) # print(depart.values) elif overlapstation.size == 0: print(\"no connecting station", "' + str(response.getheader('Location'))) # else: # print('** error ' + str(response.status) + '", "txt = [f.read()] elif fn.suffix == \"\": # single or multiple trains try:", "read ' + url) # html = '** could not read ' +", "getdata(txt, datereq): # %% first the departures data, datestr = getdept(txt, datereq) data[\"sked\"]", "0).values missedhours = timelefthours[missedind] if missedind.sum() > 0: print(missedhours) else: print(\"no missed connections", "to \" + dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1] > 1: # late vs. date", "pandas as pd from matplotlib.pyplot import figure, show \"\"\" This function should be", "\" + str(trains)) if goodtimes.size < 6 and in1d([\"conn\", \"all\"], doplot).any(): ax =", "= None return data def getdept(txt, datereq): firstheadpat = compile(r\"\\d{2}/\\d{2}/\\d{4}\") # not for", "txt = f.read().decode(\"utf-8\") except KeyError: print(\"I dont find\", zippath) txt = None else:", "arv.ix[0] # we know arrival is one line, the last line of the", "= [f.read()] elif fn.suffix == \"\": # single or multiple trains try: ziptop", "line, the last line of the file data.columns = [\"day\", \"sked\", \"act\"] return", "timedelta(days=1) # NOT relativedelta(days=1) datadt[day == \"3\"] += timedelta(days=2) return datadt def buildurl(trainnum,", "in1d, atleast_1d, logical_and from datetime import timedelta from zipfile import ZipFile from bs4", "ax.set_title(\"Histogram: Hours late to \" + laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show() else: print(\"*", "tozip(zipfn, txt, date, train): from zipfile import ZIP_DEFLATED # store as text file", "now that blank lines are gone, we swap for nan data.ix[-1] = arv.ix[0]", "no dates are give datestr = datereq.strftime(\"%m/%d/%Y\") break # data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) #", "# int can't use nan # ZERO PAD HOURS for i, sd in", "lastheadpat = compile(r\"^\\* V\") datestr = None with StringIO(txt) as inpt: for line", "# print('** 301 moved to ' + str(response.getheader('Location'))) # else: # print('** error", "response.read().decode(\"utf-8\") # session.request(\"GET\", url) # response = session.getresponse() # if response.status == 200:", "soup = BeautifulSoup(html) txt = soup.get_text() return txt def getdata(txt, datereq): # %%", "nan, in1d, atleast_1d, logical_and from datetime import timedelta from zipfile import ZipFile from", "lines are gone, we swap for nan data.ix[-1] = arv.ix[0] # we know", "is one line, the last line of the file data.columns = [\"day\", \"sked\",", "import nan, in1d, atleast_1d, logical_and from datetime import timedelta from zipfile import ZipFile", "h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data h5.close() def tozip(zipfn, txt, date, train): from", "h5[date.strftime(\"d%Y%m%d\")] = data h5.close() def tozip(zipfn, txt, date, train): from zipfile import ZIP_DEFLATED", "llrgx.findall(txt)[0] with StringIO(lastline) as inpt: arv = pd.read_fwf( inpt, colspecs=[(2, 5), (7, 8),", "not for zip files! # trainpat = compile('(?<=\\* Train )\\d+') lastheadpat = compile(r\"^\\*", "when delay rolls past midnight! # train wouldn't be more than 4 hours", "\" + date.strftime(\"%Y-%m-%d\")) return data # %% def gettxt(html): soup = BeautifulSoup(html) txt", "[\"day\", \"sked\", \"act\"] return data, datestr def getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no", "excessive time difference detected, possible parsing error!\") print(txt) print(data) data = None return", "return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca() ) ax.set_title(str(trains) + \" made connection at \"", "+= timedelta(days=1) # NOT relativedelta(days=1) datadt[day == \"3\"] += timedelta(days=2) return datadt def", "url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum) url += \"&selyear=\" + date.strftime(\"%Y\") url +=", "zippath = buildzippath(train, date) with z.open(zippath, \"r\") as f: txt = f.read().decode(\"utf-8\") except", "+= \"&selmonth=\" + date.strftime(\"%m\") url += \"&selday=\" + date.strftime(\"%d\") return url def buildziptop(train,", "daydiff:] else: daydiff = int(days[trains[0]]) - 1 arrival = actual[trains[0]].ix[overlapstation, :-daydiff] depart =", "actual[trains[0]].ix[overlapstation, :-daydiff] depart = actual[trains[1]].ix[overlapstation, daydiff:] # set_trace() goodtimes = logical_and(depart.notnull(), arrival.notnull()).values timelefthours", "+ \" made connection at \" + overlapstation) ax.set_ylabel(\"Hours left to connect\") show()", "txt = [gettxt(html)] elif fn.suffix == \"txt\": # single train with open(fn, \"r\")", "= data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\") + \"M\" dint = dstr.astype(float) # int can't", "HDFStore h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data h5.close() def tozip(zipfn, txt, date, train):", "gettxt(html): soup = BeautifulSoup(html) txt = soup.get_text() return txt def getdata(txt, datereq): #", "datestr) # %% have to skip ahead a day when delay rolls past", "print(goodtimes) # print(depart[goodtimes].index) # print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) # print(arrival.values) # print(depart.values) elif overlapstation.size", "error!\") print(txt) print(data) data = None return data def getdept(txt, datereq): firstheadpat =", "h5.close() def tozip(zipfn, txt, date, train): from zipfile import ZIP_DEFLATED # store as", "datadt = pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True ) # seems to put time-zone aware", "def tohdf5(fn, data, date): from pandas import HDFStore h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] =", "connection be made? \"\"\" # set_trace() if ( len(trains) == 2 and len(actual[trains[0]])", "+ buildziptop(train,date) with ZipFile(zipfn, \"a\") as z: zippath = buildzippath(train, date) z.writestr(zippath, txt,", "date.strftime(\"%Y-%m-%d\")) return data # %% def gettxt(html): soup = BeautifulSoup(html) txt = soup.get_text()", "= str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"], datestr) # %% have to", "else: ax = delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title( \"Train", "str(trains)) if goodtimes.size < 6 and in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.plot(ax=figure().gca(), marker=\".\",", "> int(days[trains[0]]) ): stations = [] for t in trains: stations.append(delays[t].index.values.tolist()) overlapind =", "doplot and delay.shape[1] > 0 and in1d([\"delay\", \"all\"], doplot).any(): if delay.shape[1] < 6:", "train): from zipfile import ZIP_DEFLATED # store as text file like website #", "and len(actual[trains[1]]) > 0 and len(dates) > int(days[trains[0]]) ): stations = [] for", "bs4 import BeautifulSoup from re import compile from io import StringIO from time", "+ \"M\" dint = dstr.astype(float) # int can't use nan # ZERO PAD", "= response.read().decode(\"utf-8\") # session.request(\"GET\", url) # response = session.getresponse() # if response.status ==", "trainpat = compile('(?<=\\* Train )\\d+') lastheadpat = compile(r\"^\\* V\") datestr = None with", "0 and len(dates) > int(days[trains[0]]) ): stations = [] for t in trains:", "== 2 and len(actual[trains[0]]) > 0 and len(actual[trains[1]]) > 0 and len(dates) >", "+ \" on \" + date.strftime(\"%Y-%m-%d\") ) if zipfn is not None: print(\"writing", "stop = -1 laststop = delay.index[-1] else: laststop = stop if doplot and", "6: ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else: ax = delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90],", "V\") datestr = None with StringIO(txt) as inpt: for line in inpt: tmp", "swap for nan data.ix[-1] = arv.ix[0] # we know arrival is one line,", "arv def str2datetime(data, day, datestr): dstr = data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\") + \"M\"", "header=None, skiprows=0, ) # %% append last arrival (destination) arv = getarv(txt) #", ") # seems to put time-zone aware to Eastern time.. # multi-day trips", "dstr = data.str.extract(r\"(\\d+)\") ampm = data.str.extract(\"([AP])\") + \"M\" dint = dstr.astype(float) # int", "= arv.ix[0] # we know arrival is one line, the last line of", "%% def getday(datafn, date, train, zipfn, doscrape): try: txt = filehandler(datafn, train, date)", "\"all\"], doplot).any(): ax = timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca() ) ax.set_title(str(trains) +", "f.read().decode(\"utf-8\") except KeyError: print(\"I dont find\", zippath) txt = None else: raise ValueError(\"I", "finally put to datetime datadt = pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True ) # seems", "zippath = buildzippath(train, date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) # %% def getday(datafn, date, train,", "data.str.extract(\"([AP])\") + \"M\" dint = dstr.astype(float) # int can't use nan # ZERO", "= pd.read_fwf( inpt, colspecs=[(2, 5), (16, 17), (19, 24), (31, 36)], index_col=0, header=None,", "give datestr = datereq.strftime(\"%m/%d/%Y\") break # data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns = ['city','skedarv','skeddep','actarv','actdep']", "timelefthours[missedind] if missedind.sum() > 0: print(missedhours) else: print(\"no missed connections detected for \"", "import StringIO from time import sleep import pandas as pd from matplotlib.pyplot import", "\" + dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1] > 1: # late vs. date end", "trips datadt[day == \"2\"] += timedelta(days=1) # NOT relativedelta(days=1) datadt[day == \"3\"] +=", "url += \"&selmonth=\" + date.strftime(\"%m\") url += \"&selday=\" + date.strftime(\"%d\") return url def", "# elif response.status == 301: # print('** 301 moved to ' + str(response.getheader('Location')))", "buildzippath(train, date) with z.open(zippath, \"r\") as f: txt = f.read().decode(\"utf-8\") except KeyError: print(\"I", "header=None, skiprows=0, converters={1: str}, ) return arv def str2datetime(data, day, datestr): dstr =", "# train wouldn't be more than 4 hours early! dayflip = (data[\"act\"] -", "to \" + laststop) ax.set_xlabel(\"Hours Late\") ax.set_ylabel(\"p(late)\") show() else: print(\"* skipped plotting due", "StringIO(txt) as inpt: for line in inpt: tmp = firstheadpat.findall(line) if len(tmp) >", "be used politely and sparingly \"\"\" # from http.client import HTTPConnection def plottrains(delays,", "except FileNotFoundError: if doscrape: print(\"* WARNING: beginning web scrape--be polite, they ban for", "(19, 24), (31, 36)], index_col=0, header=None, skiprows=0, ) # %% append last arrival", "-1 laststop = delay.index[-1] else: laststop = stop if doplot and delay.shape[1] >", "overlapstation.size == 0: print(\"no connecting station found\") else: print(\"more than 1 connection found,", "train wouldn't be more than 4 hours early! dayflip = (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\")", "date.strftime(\"%d\") return url def buildziptop(train, date): return Path(date.strftime(\"%Y\")) / (str(train) + \".zip\") def", "# data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns = ['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf( inpt, colspecs=[(2,", "\"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python can connection be made? \"\"\" # set_trace() if ( len(trains) ==", "detected for \" + str(trains)) if goodtimes.size < 6 and in1d([\"conn\", \"all\"], doplot).any():", "else: raise ValueError(\"I dont know how to parse\", fn) return txt def gethtml(url):", "date end of route ax = delay.ix[stop].plot( ax=figure().gca(), linestyle=\"\", marker=\"*\" ) # plots", "hours early! dayflip = (data[\"act\"] - data[\"sked\"]).astype(\"timedelta64[h]\") < -4 # hours data.ix[dayflip, \"act\"]", "as text file like website # ziptop = 'test' + buildziptop(train,date) with ZipFile(zipfn,", "\\r in lookbehind lastline = llrgx.findall(txt)[0] with StringIO(lastline) as inpt: arv = pd.read_fwf(", "= urlopen(url) html = response.read().decode(\"utf-8\") # session.request(\"GET\", url) # response = session.getresponse() #", "# add date to front # finally put to datetime datadt = pd.to_datetime(", "delay.index[-1] else: laststop = stop if doplot and delay.shape[1] > 0 and in1d([\"delay\",", "time import sleep import pandas as pd from matplotlib.pyplot import figure, show \"\"\"", "overlapind = in1d(stations[0], stations[1]) overlapstation = atleast_1d(stations[0][overlapind]) if overlapstation.size == 1: overlapstation =", "as z: zippath = buildzippath(train, date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) # %% def getday(datafn,", "to front # finally put to datetime datadt = pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True", "+ date.strftime(\"%m\") url += \"&selday=\" + date.strftime(\"%d\") return url def buildziptop(train, date): return", "fn.suffix == \"\": # single or multiple trains try: ziptop = buildziptop(train, date)", "data.ix[dayflip, \"act\"] += timedelta(days=1) data[\"delayhours\"] = (data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\" ) / 60", "1: overlapstation = overlapstation[0] otherind = in1d(stations[1], overlapstation) if otherind > overlapind: daydiff", "# \" + train + \" on \" + date.strftime(\"%Y-%m-%d\") ) if zipfn", "= (data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\" ) / 60 # .values.astype(float)/1e9/3600 data[\"diffdelay\"] = data[\"delayhours\"].diff()", "data, datestr = getdept(txt, datereq) data[\"sked\"] = str2datetime(data[\"sked\"], data[\"day\"], datestr) data[\"act\"] = str2datetime(data[\"act\"],", "Path(date.strftime(\"%Y\")) / (str(train) + \".zip\") def buildzippath(train, date): return Path(train) / \"\".join([train, \"_\",", "in trains: stations.append(delays[t].index.values.tolist()) overlapind = in1d(stations[0], stations[1]) overlapstation = atleast_1d(stations[0][overlapind]) if overlapstation.size ==", "else: print(\"more than 1 connection found, this case isnt handled yet\") else: print(\"skipped", "def plottrain(delay, train, dates, stop, doplot): if stop is None: stop = -1", "be a zip file where no dates are give datestr = datereq.strftime(\"%m/%d/%Y\") break", "- 1 arrival = actual[trains[0]].ix[overlapstation, :-daydiff] depart = actual[trains[1]].ix[overlapstation, daydiff:] # set_trace() goodtimes", "= actual[trains[1]].ix[overlapstation, daydiff:] # set_trace() goodtimes = logical_and(depart.notnull(), arrival.notnull()).values timelefthours = ( (depart[goodtimes].values", "# trainpat = compile('(?<=\\* Train )\\d+') lastheadpat = compile(r\"^\\* V\") datestr = None", "(7, 8), (10, 15), (25, 30)], index_col=0, header=None, skiprows=0, converters={1: str}, ) return", "from http.client import HTTPConnection def plottrains(delays, actual, days, trains, dates, doplot): \"\"\" http://stackoverflow.com/questions/11697709/comparing-two-lists-in-python", "i, sd in enumerate(dint): if sd != \"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr =", "# finally put to datetime datadt = pd.to_datetime( dstr, format=\"%m/%d/%YT%I%M%p\", utc=True ) #", "doscrape): try: txt = filehandler(datafn, train, date) except FileNotFoundError: if doscrape: print(\"* WARNING:", "-4 # hours data.ix[dayflip, \"act\"] += timedelta(days=1) data[\"delayhours\"] = (data[\"act\"] - data[\"sked\"]).astype( \"timedelta64[m]\"", "url += str(trainnum) url += \"&selyear=\" + date.strftime(\"%Y\") url += \"&selmonth=\" + date.strftime(\"%m\")", "compile('(?<=\\* Train )\\d+') lastheadpat = compile(r\"^\\* V\") datestr = None with StringIO(txt) as", "\"M\" dint = dstr.astype(float) # int can't use nan # ZERO PAD HOURS", "= -1 laststop = delay.index[-1] else: laststop = stop if doplot and delay.shape[1]", "dates[0].strftime(\"%Y/%m/%d\") + \" to \" + dates[-1].strftime(\"%Y/%m/%d\") ) if delay.shape[1] > 1: #", "and in1d([\"delay\", \"all\"], doplot).any(): if delay.shape[1] < 6: ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8)", "BeautifulSoup from re import compile from io import StringIO from time import sleep", "None: print(\"writing \" + date.strftime(\"%Y-%m-%d\") + \" to \" + zipfn) tozip(zipfn, txt,", "= read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns = ['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf( inpt, colspecs=[(2, 5), (16,", "break # data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0) # data.columns = ['city','skedarv','skeddep','actarv','actdep'] data = pd.read_fwf( inpt,", "\"sked\", \"act\"] return data, datestr def getarv(txt): llrgx = compile(\"(?<=\\n).+(?=\\r*\\n+$)\") # no \\r", "nan # ZERO PAD HOURS for i, sd in enumerate(dint): if sd !=", "(31, 36)], index_col=0, header=None, skiprows=0, ) # %% append last arrival (destination) arv", "station found\") else: print(\"more than 1 connection found, this case isnt handled yet\")", "connection analysis due to missing train info or too few dates\") def plottrain(delay,", "int(days[trains[0]]) - 1 arrival = actual[trains[0]].ix[overlapstation, :-daydiff] depart = actual[trains[1]].ix[overlapstation, daydiff:] # set_trace()", "laststop) ax.set_ylabel(\"Hours Late\") ax.set_xlabel(\"date\") # histogram ax = delay.ix[stop].hist(ax=figure().gca(), normed=1, bins=12) ax.set_title(\"Histogram: Hours", "StringIO from time import sleep import pandas as pd from matplotlib.pyplot import figure,", "tozip(zipfn, txt, date, train) try: data = getdata(txt, date) except StopIteration: data =", "as f: txt = [f.read()] elif fn.suffix == \"\": # single or multiple", "date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url += str(trainnum) url += \"&selyear=\" + date.strftime(\"%Y\") url", "timelefthours.boxplot( return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca() ) ax.set_title(str(trains) + \" made connection at", "data[\"diffdelay\"] = data[\"delayhours\"].diff() # we don't expect the delay to jump more than", "is not None: print(\"writing \" + date.strftime(\"%Y-%m-%d\") + \" to \" + zipfn)", "\"all\"], doplot).any(): if delay.shape[1] < 6: ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else: ax", "== \"3\"] += timedelta(days=2) return datadt def buildurl(trainnum, date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url", "colspecs=[(2, 5), (7, 8), (10, 15), (25, 30)], index_col=0, header=None, skiprows=0, converters={1: str},", "ax = delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title( \"Train #\"", "converters={1: str}, ) return arv def str2datetime(data, day, datestr): dstr = data.str.extract(r\"(\\d+)\") ampm", "the file data.columns = [\"day\", \"sked\", \"act\"] return data, datestr def getarv(txt): llrgx", "dont know how to parse\", fn) return txt def gethtml(url): response = urlopen(url)", "if missedind.sum() > 0: print(missedhours) else: print(\"no missed connections detected for \" +", "= Path(fn).expanduser() if fn.suffix in \"html\": # single train with open(fn, \"r\") as", "print((depart.values-arrival.values).astype(float)) # print(arrival.values) # print(depart.values) elif overlapstation.size == 0: print(\"no connecting station found\")", "+ \"T\" + dstr + ampm # add date to front # finally", "if delay.shape[1] < 6: ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else: ax = delay.T.boxplot(return_type=\"axes\",", "# %% have to skip ahead a day when delay rolls past midnight!", "in1d([\"delay\", \"all\"], doplot).any(): if delay.shape[1] < 6: ax = delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else:", "parsing error!\") print(txt) print(data) data = None return data def getdept(txt, datereq): firstheadpat", "between stations if (data[\"diffdelay\"].abs() > 12).any(): print(\"** WARNING: excessive time difference detected, possible", "if stop is None: stop = -1 laststop = delay.index[-1] else: laststop =", "skipped plotting due to no data\") def tohdf5(fn, data, date): from pandas import", "+ str(trains)) if goodtimes.size < 6 and in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.plot(ax=figure().gca(),", "to connect\") show() # print(goodtimes) # print(depart[goodtimes].index) # print((depart.values-arrival.values)) # print((depart.values-arrival.values).astype(float)) # print(arrival.values)", "if doscrape: print(\"* WARNING: beginning web scrape--be polite, they ban for overuse!\") url", "aware to Eastern time.. # multi-day trips datadt[day == \"2\"] += timedelta(days=1) #", "to process \" + date.strftime(\"%Y-%m-%d\")) return data # %% def gettxt(html): soup =", "data, date): from pandas import HDFStore h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data h5.close()", "dstr = datestr + \"T\" + dstr + ampm # add date to", "if delay.shape[1] > 1: # late vs. date end of route ax =", "enumerate(dint): if sd != \"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr = datestr + \"T\"", "pathlib import Path from urllib.request import urlopen from numpy import nan, in1d, atleast_1d,", "gone, we swap for nan data.ix[-1] = arv.ix[0] # we know arrival is", "+ \".zip\") def buildzippath(train, date): return Path(train) / \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def", "# %% append last arrival (destination) arv = getarv(txt) # %% drop blank", "ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title( \"Train #\" + train + \" \" +", "where no dates are give datestr = datereq.strftime(\"%m/%d/%Y\") break # data = read_fwf(inpt,colspecs=[(2,5),(10,15),(16,17),(19,24),(25,30),(31,36)],skiprows=0)", "/ \"\".join([train, \"_\", date.strftime(\"%Y%m%d\"), \".txt\"]) def filehandler(fn, train, date): fn = Path(fn).expanduser() if", "== 200: # html = response.read().decode('utf-8') # elif response.status == 301: # print('**", "\"you dont seem to have the needed data file for Train # \"", "(16, 17), (19, 24), (31, 36)], index_col=0, header=None, skiprows=0, ) # %% append", "rot=90, whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\") ax.set_ylabel(\"hours delay\") ax.set_title( \"Train #\" + train +", "\"&selyear=\" + date.strftime(\"%Y\") url += \"&selmonth=\" + date.strftime(\"%m\") url += \"&selday=\" + date.strftime(\"%d\")", "== \"\": # single or multiple trains try: ziptop = buildziptop(train, date) with", "data[\"day\"], datestr) data[\"act\"] = str2datetime(data[\"act\"], data[\"day\"], datestr) # %% have to skip ahead", "PAD HOURS for i, sd in enumerate(dint): if sd != \"NaN\": dstr[i] =", "buildziptop(train, date) with ZipFile(ziptop, \"r\") as z: zippath = buildzippath(train, date) with z.open(zippath,", "inpt: tmp = firstheadpat.findall(line) if len(tmp) > 0: datestr = tmp[0] if len(lastheadpat.findall(line))", "# multi-day trips datadt[day == \"2\"] += timedelta(days=1) # NOT relativedelta(days=1) datadt[day ==", "\" + date.strftime(\"%Y-%m-%d\") + \" to \" + zipfn) tozip(zipfn, txt, date, train)", "data\") def tohdf5(fn, data, date): from pandas import HDFStore h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")]", "nan) # now that blank lines are gone, we swap for nan data.ix[-1]", "( len(trains) == 2 and len(actual[trains[0]]) > 0 and len(actual[trains[1]]) > 0 and", "connection found, this case isnt handled yet\") else: print(\"skipped connection analysis due to", "url += \"&selyear=\" + date.strftime(\"%Y\") url += \"&selmonth=\" + date.strftime(\"%m\") url += \"&selday=\"", "with ZipFile(zipfn, \"a\") as z: zippath = buildzippath(train, date) z.writestr(zippath, txt, compress_type=ZIP_DEFLATED) #", "session.getresponse() # if response.status == 200: # html = response.read().decode('utf-8') # elif response.status", "ZIP_DEFLATED # store as text file like website # ziptop = 'test' +", "appending arrival data = data.dropna(axis=0, how=\"all\") # needed for trailing blank lines data", "import urlopen from numpy import nan, in1d, atleast_1d, logical_and from datetime import timedelta", "\"3\"] += timedelta(days=2) return datadt def buildurl(trainnum, date): url = \"http://dixielandsoftware.net/cgi-bin/gettrain.pl?seltrain=\" url +=", "0: if datestr is None: # must be a zip file where no", "few dates\") def plottrain(delay, train, dates, stop, doplot): if stop is None: stop", "'test' + buildziptop(train,date) with ZipFile(zipfn, \"a\") as z: zippath = buildzippath(train, date) z.writestr(zippath,", "overuse!\") url = buildurl(train, date) # mass download, throttle to be polite sleep(2)", "the last line of the file data.columns = [\"day\", \"sked\", \"act\"] return data,", "6 and in1d([\"conn\", \"all\"], doplot).any(): ax = timelefthours.plot(ax=figure().gca(), marker=\".\", legend=False) ax.set_xlabel(\"date\") elif in1d([\"conn\",", "date) with z.open(zippath, \"r\") as f: txt = f.read().decode(\"utf-8\") except KeyError: print(\"I dont", "to parse\", fn) return txt def gethtml(url): response = urlopen(url) html = response.read().decode(\"utf-8\")", "#\" + train + \" \" + dates[0].strftime(\"%Y/%m/%d\") + \" to \" +", "for i, sd in enumerate(dint): if sd != \"NaN\": dstr[i] = \"{:04d}\".format(sd.astype(int)) dstr", "import HDFStore h5 = HDFStore(fn) h5[date.strftime(\"d%Y%m%d\")] = data h5.close() def tozip(zipfn, txt, date,", "from zipfile import ZipFile from bs4 import BeautifulSoup from re import compile from", "90], ax=figure().gca() ) ax.set_title(str(trains) + \" made connection at \" + overlapstation) ax.set_ylabel(\"Hours", "len(tmp) > 0: datestr = tmp[0] if len(lastheadpat.findall(line)) > 0: if datestr is", "print(\"more than 1 connection found, this case isnt handled yet\") else: print(\"skipped connection", "int(days[trains[1]]) - 1 arrival = actual[trains[1]].ix[overlapstation, :-daydiff] depart = actual[trains[0]].ix[overlapstation, daydiff:] else: daydiff", "= delay.plot(ax=figure().gca()) ax.legend(loc=\"best\", fontsize=8) else: ax = delay.T.boxplot(return_type=\"axes\", rot=90, whis=[10, 90], ax=figure().gca()) ax.set_xlabel(\"Station\")", "from numpy import nan, in1d, atleast_1d, logical_and from datetime import timedelta from zipfile", "datereq): # %% first the departures data, datestr = getdept(txt, datereq) data[\"sked\"] =", "train + \" on \" + date.strftime(\"%Y-%m-%d\") ) if zipfn is not None:", "matplotlib.pyplot import figure, show \"\"\" This function should be used politely and sparingly", "ziptop = 'test' + buildziptop(train,date) with ZipFile(zipfn, \"a\") as z: zippath = buildzippath(train," ]
[ "import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls, *args, **kwargs): if 'dynamic_content' in kwargs:", "in kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps', 'steps_es'] for step_keyword in step_keywords:", "@classmethod def create(cls, *args, **kwargs): if 'dynamic_content' in kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords", "ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls, *args, **kwargs): if 'dynamic_content' in kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content'])", "kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword]) return super(ServicePageFactory, cls).create(*args, **kwargs) class Meta: model = ServicePage", "= ['steps', 'steps_es'] for step_keyword in step_keywords: if step_keyword in kwargs: kwargs[step_keyword] =", "if step_keyword in kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword]) return super(ServicePageFactory, cls).create(*args, **kwargs) class Meta:", "step_keyword in step_keywords: if step_keyword in kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword]) return super(ServicePageFactory, cls).create(*args,", "pages.topic_page.factories import JanisBasePageWithTopicsFactory from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls, *args,", "import JanisBasePageWithTopicsFactory from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls, *args, **kwargs):", "streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls, *args, **kwargs): if 'dynamic_content' in kwargs: kwargs['dynamic_content']", "ServicePage from pages.topic_page.factories import JanisBasePageWithTopicsFactory from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def", "from pages.topic_page.factories import JanisBasePageWithTopicsFactory from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls,", "*args, **kwargs): if 'dynamic_content' in kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps', 'steps_es']", "streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps', 'steps_es'] for step_keyword in step_keywords: if step_keyword in kwargs:", "in kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword]) return super(ServicePageFactory, cls).create(*args, **kwargs) class Meta: model =", "step_keywords = ['steps', 'steps_es'] for step_keyword in step_keywords: if step_keyword in kwargs: kwargs[step_keyword]", "step_keyword in kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword]) return super(ServicePageFactory, cls).create(*args, **kwargs) class Meta: model", "step_keywords: if step_keyword in kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword]) return super(ServicePageFactory, cls).create(*args, **kwargs) class", "'steps_es'] for step_keyword in step_keywords: if step_keyword in kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword]) return", "= streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps', 'steps_es'] for step_keyword in step_keywords: if step_keyword in", "JanisBasePageWithTopicsFactory from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls, *args, **kwargs): if", "create(cls, *args, **kwargs): if 'dynamic_content' in kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps',", "pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls, *args, **kwargs): if 'dynamic_content' in", "class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls, *args, **kwargs): if 'dynamic_content' in kwargs: kwargs['dynamic_content'] =", "in step_keywords: if step_keyword in kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword]) return super(ServicePageFactory, cls).create(*args, **kwargs)", "if 'dynamic_content' in kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps', 'steps_es'] for step_keyword", "from pages.service_page.models import ServicePage from pages.topic_page.factories import JanisBasePageWithTopicsFactory from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class", "for step_keyword in step_keywords: if step_keyword in kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword]) return super(ServicePageFactory,", "['steps', 'steps_es'] for step_keyword in step_keywords: if step_keyword in kwargs: kwargs[step_keyword] = streamfieldify(kwargs[step_keyword])", "pages.service_page.models import ServicePage from pages.topic_page.factories import JanisBasePageWithTopicsFactory from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory):", "from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod def create(cls, *args, **kwargs): if 'dynamic_content'", "kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps', 'steps_es'] for step_keyword in step_keywords: if step_keyword", "import ServicePage from pages.topic_page.factories import JanisBasePageWithTopicsFactory from pages.base_page.fixtures.helpers.streamfieldify import streamfieldify class ServicePageFactory(JanisBasePageWithTopicsFactory): @classmethod", "'dynamic_content' in kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps', 'steps_es'] for step_keyword in", "def create(cls, *args, **kwargs): if 'dynamic_content' in kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords =", "**kwargs): if 'dynamic_content' in kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps', 'steps_es'] for", "kwargs: kwargs['dynamic_content'] = streamfieldify(kwargs['dynamic_content']) step_keywords = ['steps', 'steps_es'] for step_keyword in step_keywords: if" ]
[ "allowed_environments: list, ): self.__container_init_function = container_init_function self.__kernel_class = kernel_class self.__root_module_name = root_module_name self.__allowed_environments", "container_init_function self.__kernel_class = kernel_class self.__root_module_name = root_module_name self.__allowed_environments = allowed_environments @property def container_init_function(self):", "@property def container_init_function(self): return self.__container_init_function @property def kernel_class(self): return self.__kernel_class @property def root_module_name(self):", "self, container_init_function: callable, kernel_class: type, root_module_name: str, allowed_environments: list, ): self.__container_init_function = container_init_function", "def kernel_class(self): return self.__kernel_class @property def root_module_name(self): return self.__root_module_name @property def allowed_environments(self): return", "def __init__( self, container_init_function: callable, kernel_class: type, root_module_name: str, allowed_environments: list, ): self.__container_init_function", "__init__( self, container_init_function: callable, kernel_class: type, root_module_name: str, allowed_environments: list, ): self.__container_init_function =", "def container_init_function(self): return self.__container_init_function @property def kernel_class(self): return self.__kernel_class @property def root_module_name(self): return", "container_init_function: callable, kernel_class: type, root_module_name: str, allowed_environments: list, ): self.__container_init_function = container_init_function self.__kernel_class", "kernel_class: type, root_module_name: str, allowed_environments: list, ): self.__container_init_function = container_init_function self.__kernel_class = kernel_class", "= container_init_function self.__kernel_class = kernel_class self.__root_module_name = root_module_name self.__allowed_environments = allowed_environments @property def", "= root_module_name self.__allowed_environments = allowed_environments @property def container_init_function(self): return self.__container_init_function @property def kernel_class(self):", "self.__root_module_name = root_module_name self.__allowed_environments = allowed_environments @property def container_init_function(self): return self.__container_init_function @property def", "class Config: def __init__( self, container_init_function: callable, kernel_class: type, root_module_name: str, allowed_environments: list,", "Config: def __init__( self, container_init_function: callable, kernel_class: type, root_module_name: str, allowed_environments: list, ):", "self.__container_init_function = container_init_function self.__kernel_class = kernel_class self.__root_module_name = root_module_name self.__allowed_environments = allowed_environments @property", "str, allowed_environments: list, ): self.__container_init_function = container_init_function self.__kernel_class = kernel_class self.__root_module_name = root_module_name", "): self.__container_init_function = container_init_function self.__kernel_class = kernel_class self.__root_module_name = root_module_name self.__allowed_environments = allowed_environments", "list, ): self.__container_init_function = container_init_function self.__kernel_class = kernel_class self.__root_module_name = root_module_name self.__allowed_environments =", "kernel_class self.__root_module_name = root_module_name self.__allowed_environments = allowed_environments @property def container_init_function(self): return self.__container_init_function @property", "return self.__container_init_function @property def kernel_class(self): return self.__kernel_class @property def root_module_name(self): return self.__root_module_name @property", "= kernel_class self.__root_module_name = root_module_name self.__allowed_environments = allowed_environments @property def container_init_function(self): return self.__container_init_function", "<filename>src/pyfonycore/bootstrap/config/Config.py class Config: def __init__( self, container_init_function: callable, kernel_class: type, root_module_name: str, allowed_environments:", "container_init_function(self): return self.__container_init_function @property def kernel_class(self): return self.__kernel_class @property def root_module_name(self): return self.__root_module_name", "self.__container_init_function @property def kernel_class(self): return self.__kernel_class @property def root_module_name(self): return self.__root_module_name @property def", "type, root_module_name: str, allowed_environments: list, ): self.__container_init_function = container_init_function self.__kernel_class = kernel_class self.__root_module_name", "callable, kernel_class: type, root_module_name: str, allowed_environments: list, ): self.__container_init_function = container_init_function self.__kernel_class =", "root_module_name self.__allowed_environments = allowed_environments @property def container_init_function(self): return self.__container_init_function @property def kernel_class(self): return", "self.__kernel_class = kernel_class self.__root_module_name = root_module_name self.__allowed_environments = allowed_environments @property def container_init_function(self): return", "kernel_class(self): return self.__kernel_class @property def root_module_name(self): return self.__root_module_name @property def allowed_environments(self): return self.__allowed_environments", "= allowed_environments @property def container_init_function(self): return self.__container_init_function @property def kernel_class(self): return self.__kernel_class @property", "root_module_name: str, allowed_environments: list, ): self.__container_init_function = container_init_function self.__kernel_class = kernel_class self.__root_module_name =", "@property def kernel_class(self): return self.__kernel_class @property def root_module_name(self): return self.__root_module_name @property def allowed_environments(self):", "self.__allowed_environments = allowed_environments @property def container_init_function(self): return self.__container_init_function @property def kernel_class(self): return self.__kernel_class", "allowed_environments @property def container_init_function(self): return self.__container_init_function @property def kernel_class(self): return self.__kernel_class @property def" ]
[ "import List class Solution: def majorityElement(self, nums: List[int]) -> int: count, candidate =", "from typing import List class Solution: def majorityElement(self, nums: List[int]) -> int: count,", "def majorityElement(self, nums: List[int]) -> int: count, candidate = 0, 0 for num", "candidate = 0, 0 for num in nums: if count == 0: candidate", "\"Bannings\" from typing import List class Solution: def majorityElement(self, nums: List[int]) -> int:", "nums: List[int]) -> int: count, candidate = 0, 0 for num in nums:", "candidate = num count += (1 if num == candidate else -1) return", "== candidate else -1) return candidate if __name__ == '__main__': assert Solution().majorityElement([3,2,3]) ==", "return candidate if __name__ == '__main__': assert Solution().majorityElement([3,2,3]) == 3 assert Solution().majorityElement([2,2,1,1,1,2,2]) ==", "#!/usr/bin/python3 # -*-coding:utf-8-*- __author__ = \"Bannings\" from typing import List class Solution: def", "Solution: def majorityElement(self, nums: List[int]) -> int: count, candidate = 0, 0 for", "__author__ = \"Bannings\" from typing import List class Solution: def majorityElement(self, nums: List[int])", "(1 if num == candidate else -1) return candidate if __name__ == '__main__':", "+= (1 if num == candidate else -1) return candidate if __name__ ==", "else -1) return candidate if __name__ == '__main__': assert Solution().majorityElement([3,2,3]) == 3 assert", "= 0, 0 for num in nums: if count == 0: candidate =", "count += (1 if num == candidate else -1) return candidate if __name__", "List class Solution: def majorityElement(self, nums: List[int]) -> int: count, candidate = 0,", "0: candidate = num count += (1 if num == candidate else -1)", "List[int]) -> int: count, candidate = 0, 0 for num in nums: if", "count, candidate = 0, 0 for num in nums: if count == 0:", "<reponame>zhangao0086/Python-Algorithm #!/usr/bin/python3 # -*-coding:utf-8-*- __author__ = \"Bannings\" from typing import List class Solution:", "count == 0: candidate = num count += (1 if num == candidate", "nums: if count == 0: candidate = num count += (1 if num", "num == candidate else -1) return candidate if __name__ == '__main__': assert Solution().majorityElement([3,2,3])", "# -*-coding:utf-8-*- __author__ = \"Bannings\" from typing import List class Solution: def majorityElement(self,", "= num count += (1 if num == candidate else -1) return candidate", "candidate else -1) return candidate if __name__ == '__main__': assert Solution().majorityElement([3,2,3]) == 3", "for num in nums: if count == 0: candidate = num count +=", "if num == candidate else -1) return candidate if __name__ == '__main__': assert", "-*-coding:utf-8-*- __author__ = \"Bannings\" from typing import List class Solution: def majorityElement(self, nums:", "class Solution: def majorityElement(self, nums: List[int]) -> int: count, candidate = 0, 0", "num in nums: if count == 0: candidate = num count += (1", "-1) return candidate if __name__ == '__main__': assert Solution().majorityElement([3,2,3]) == 3 assert Solution().majorityElement([2,2,1,1,1,2,2])", "0, 0 for num in nums: if count == 0: candidate = num", "in nums: if count == 0: candidate = num count += (1 if", "majorityElement(self, nums: List[int]) -> int: count, candidate = 0, 0 for num in", "== 0: candidate = num count += (1 if num == candidate else", "if count == 0: candidate = num count += (1 if num ==", "num count += (1 if num == candidate else -1) return candidate if", "typing import List class Solution: def majorityElement(self, nums: List[int]) -> int: count, candidate", "0 for num in nums: if count == 0: candidate = num count", "-> int: count, candidate = 0, 0 for num in nums: if count", "int: count, candidate = 0, 0 for num in nums: if count ==", "candidate if __name__ == '__main__': assert Solution().majorityElement([3,2,3]) == 3 assert Solution().majorityElement([2,2,1,1,1,2,2]) == 2", "= \"Bannings\" from typing import List class Solution: def majorityElement(self, nums: List[int]) ->" ]
[ "Unless required by applicable law or agreed to in writing, software # distributed", "Normally, only value needs to be provided.\" \" We will proceed but you", "configuration file. :param bool is_user_config: If True, the config object represents user-provided configuration.", "is None else JSON loaded from the file. \"\"\" if fname is None:", "file_name): \"\"\" Dumps *dictionary* as a json object to a file with *file_name*", "files in a directory, possibly, recursively. Find files which names satisfy *file_name_pattern* pattern", "match or query is None :rtype: bool \"\"\" if query is None: return", "True assert policy in ['relaxed', 'strict'], \"\" for field, value in query.iteritems(): if", "must_match=True, add_only_keys=None, ignore_errors=False): \"\"\" Updates *dictionary* with items from *iterable* object. This method", "dictionary of parameters info This method loads configuration files located in 'path'. If", "'strict'], \"\" for field, value in query.iteritems(): if field not in dictionary: if", "name in param_info: raise ConfigurationError( \"Parameter info update error.\" \" Parameter redefinition is", "write_json(fname, data, check_extension=False): \"\"\" Dumps *dictionary* as a json object to a file", "does not create queue and process. :param str launcher: A full path to", "must not delete file here, but create or empty it in host OS.", "update its value else: # Just parameter value val_type = 'str' if isinstance(val,", "remove_pid_file(self): \"\"\"Deletes pif file from disk.\"\"\" try: os.remove(self.pid_file) except OSError: pass def empty_pid_file(self):", "The dictionary is modified in-place. :param dict dictionary: Dictionary to check. :param str", "True try: importlib.import_module(module_name) except ImportError: logging.warn(\"Module '%s' cannot be imported, certain system information", "str fname: File name. :param boolean check_extension: If True, raises exception if fname", "% \\ (str(is_root), key, val_type, str(schema['types'])) ) # So, the type is expected.", "'strict', every key in query must exist in dictionary with the same value", "= launcher self.pid_file = os.path.join(pid_folder, 'proc.pid') self.frequency = frequency self.queue = None self.monitor_process", "set it info object. # TODO what about parameter type and description? else:", "is fixed and its value is `proc.pid`. :param float frequency: A sampling frequency", "if count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else:", "matches will be stored if match has been identified. :return: True if match", "query.iteritems(): if field not in dictionary: if policy == 'relaxed': continue else: return", "= fields_split[0] assert field_name not in self.fields,\\ \"Found duplicate timeseries field (%s)\" %", "expected. val_type = type(source[key]).__name__ if not isinstance(source[key], schema['types']): raise ConfigurationError( \"Configuration update error", "config that redefine parameters in existing param_info differently. See comments below. We are", "False if matches is not None: matches['%s_0' % (field)] = dictionary[field] else: if", "= [os.path.join(path, f) for f in files] else: config_files = [os.path.join(path, f) for", "See comments below. We are interested here only in parameters section where parameter", "if not os.path.isfile(config_file): raise ValueError(\"Configuration load error. Configuration data cannot be loaded for", "os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif os.path.isfile(path_spec): files.append(path_spec) return files @staticmethod def get_non_existing_file(file_name, max_attempts", "and waits for resource monitor to finish.\"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('exit')", "configurations from :param list files: List of file names to load. If None,", "This should not generally happen since we deal with it in update_param_info, but", "if value is an empty string if value != dictionary[field]: return False elif", "destination dictionary. dest[key] = copy.deepcopy(source[key]) else: # The key from source is in", ") param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its value", "if self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname, self.__flags[0]) else: self.__fobj = open(self.__fname, self.__flags[1]) return self.__fobj", "in source dictionary is not in destination dictionary. dest[key] = copy.deepcopy(source[key]) else: #", "'w') as fobj: json.dump(data, fobj, indent=4) class DictUtils(object): \"\"\"Container for dictionary helpers.\"\"\" @staticmethod", "specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {} raw_fields = fields_specs.split(',') for raw_field in raw_fields:", "import re import logging import subprocess import importlib from multiprocessing import Process from", "16 OR 32. :param dict dictionary: Dictionary to match. :param dict query: Query", "policy == 'relaxed': continue else: return False if isinstance(value, list) or not isinstance(value,", "create or empty it in host OS. \"\"\" self.empty_pid_file() self.queue = Queue() self.monitor_process", "return True @staticmethod def ensure_exists(dictionary, key, default_value=None): \"\"\" Ensures that the dictionary *dictionary*", "not in dictionary: if policy == 'relaxed': continue else: return False if isinstance(value,", "in existing param_info differently. See comments below. We are interested here only in", "if dir_name != '' and not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def find_files(directory, file_name_pattern, recursively=False):", "'count': count } @staticmethod def monitor_function(launcher, pid_file, frequency, queue): \"\"\"A main monitor worker", "= isinstance(dest[key], dict) and isinstance(source[key], dict) both_lists = isinstance(dest[key], list) and isinstance(source[key], list)", "differently. See comments below. We are interested here only in parameters section where", "'.json' or '.json.gz'. :rtype: None or JSON object :return: None of fname is", "val = params[name] if isinstance(val, dict): # This should not generally happen since", "False elif matches is not None: matches['%s_0' % (field)] = dictionary[field] continue else:", "file from disk.\"\"\" try: os.remove(self.pid_file) except OSError: pass def empty_pid_file(self): \"\"\"Empty pid file.\"\"\"", "return int(str_val) elif val_type == 'float': return float(str_val) elif val_type == 'bool': v", "we match primitive types such as numbers and strings not lists or dictionaries.", "induced automatically based on JSON parse result. \"\"\" if 'parameters' not in config:", "True: candidate_file_name = \"%s.%d\" % (file_name, attempt) if not os.path.exists(candidate_file_name): return candidate_file_name attempt", "in query are lists, then condition OR applies. For instance: match(dictionary, query =", "name, param_info[name] ) @staticmethod def remove_info(config): \"\"\"In parameter section of a **config** the", "directories. :return: List of file names satisfying *file_name_pattern* pattern. \"\"\" files = []", "\"Found duplicate timeseries field (%s)\" % field_name field_type = fields_split[1] assert field_type in", "'int', 'float', 'bool')\" % field_type index = int(fields_split[2]) if len(fields_split) == 3: count", "path to folder where pid file is created. The file name is fixed", "index = int(fields_split[2]) if len(fields_split) == 3: count = -1 elif fields_split[3] ==", "have_module class Modules(object): \"\"\"A class that enumerates non-standard python modules this project depends", "name of a module to try to import, something like 'numpy', 'pandas', 'matplotlib'", "object represents user-provided configuration. If False, this is a system configuration. Based on", "@staticmethod def subdict(dictionary, keys): \"\"\"Return subdictionary containing only keys from 'keys'. :param dict", "\"\"\"Update parameter info dictionary based on configurationi in **config** :param dict param_info: A", "key, str(value)) except ValueError as err: if not ignore_errors: raise ConfigurationError(\"Cannot parse JSON", "fname: File name. :param boolean check_extension: If True, raises exception if fname does", "dictionaries. If policy is 'strict', every key in query must exist in dictionary", "both_dicts = isinstance(dest[key], dict) and isinstance(source[key], dict) both_lists = isinstance(dest[key], list) and isinstance(source[key],", "str(value)) except ValueError as err: if not ignore_errors: raise ConfigurationError(\"Cannot parse JSON string", "('true', 1, 'on') else: assert False, \"Invalid value type %s\" % val_type def", "(field)] = dictionary[field] else: if value == '': # Take special care if", "redefinition. if name in param_info: raise ConfigurationError( \"Parameter info update error.\" \" Parameter", "import gzip import re import logging import subprocess import importlib from multiprocessing import", "isinstance(val, dict): # This is a complete parameter definition with name, value and", "those items are added to *dictionary*, that are in this list. Existing items", "file_name_pattern, recursively=False): \"\"\"Find files in a directory, possibly, recursively. Find files which names", "config_section, is_user_config=False) # Joing configuration from this single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError:", "info update error.\" \" Parameter has invalid type = '%s'.\" \" Parameter definition", "definition is %s = %s\" % (param_info[name]['type'], name, param_info[name]) ) if 'type' not", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "OS. \"\"\" self.empty_pid_file() self.queue = Queue() self.monitor_process = Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency,", "contains all keys in 'keys' :param dict dictionary: Input dictionary. :param list_or_val keys:", "for non-user configuration.\" \" This is a system configuration error that must not", "None: break if output: # The 'output' is a string printed out by", "serialize. :param any data: A data to dump into a JSON file. :param", "assert field_name not in self.fields,\\ \"Found duplicate timeseries field (%s)\" % field_name field_type", "dest: # The key in source dictionary is not in destination dictionary. dest[key]", "items which keys names start with *prefix*. \"\"\" return_dictionary = {} for key", "# The key from source is in dest. both_dicts = isinstance(dest[key], dict) and", "[os.path.join(path, f) for f in os.listdir(path) if f.endswith('.json')] config = {} # Configuration", "\"\"\"Container for dictionary helpers.\"\"\" @staticmethod def subdict(dictionary, keys): \"\"\"Return subdictionary containing only keys", "For instance, is can be '*.log' :param bool recursively: If True, search in", "parent folder of this file exists. The file itself may not exist. A", "True if module has been imported, False otherwise. \"\"\" have_module = True try:", "that are in this list. Existing items in *dictionary* are overwritten with new", "ignore_errors: raise ConfigurationError(\"Cannot parse JSON string '%s' with key '%s' (key-value definition: '%s').", "= {'types':(list, basestring, int, float, long)} for key in source: # Firstly, check", "param_info[name]) ) if 'type' not in param_info[name] or 'desc' not in param_info[name]: logging.warn(", "type(dest[key]), type(source[key]), '[dict, list]') if both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False) else: dest[key].extend(source[key]) else: if", "If key does not exist, it adds a new item with value *default_value*.", "info removed \"\"\" clean_config = copy.deepcopy(config) if 'parameters' in clean_config: params = clean_config['parameters']", "\"\"\"Loads experimenter configuration from multiple files.\"\"\" @staticmethod def load(path, files=None): \"\"\"Loads configurations (normally", "A list of file names / directories. :param str file_name_pattern: A file name", "not exist, it will be created. See documentation for :py:func:`os.makedirs` for more details.", "is not a json-parseable string. \"\"\" matcher = re.compile(pattern) for line in iterable:", ":param list add_only_keys: If not None, specifies keys that are added into\\ *dictionary*.", "JSON loaded from the file. \"\"\" if fname is None: return None if", "not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]') if both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False) else:", "open(self.__fname, self.__flags[1]) return self.__fobj def __exit__(self, type, value, traceback): self.__fobj.close() class IOUtils(object): \"\"\"Container", "exist, it adds a new item with value *default_value*. The dictionary is modified", ":return: Dictionary that maps metric field to a time series of its value.", "files. :param dict dictionary: Dictionary to update in-place. :param obj iterable: Iterable object", "seconds. Can be something like 0.1 seconds :param multiprocessing.Queue queue: A queue to", "OR contains\\ key 'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\": [\"tensorflow\",", "match. :param dict query: Query to use. :param ['relaxed', 'strict'] policy: Policy to", "be one of ('str', 'int', 'float', 'bool')\" % field_type index = int(fields_split[2]) if", "== 'int': return int(str_val) elif val_type == 'float': return float(str_val) elif val_type ==", "Load user-provided configuration. In this case, we still update parameter info structure, but", "to be provided.\" \" We will proceed but you may want to fix", "\"%s.%d\" % (file_name, attempt) if not os.path.exists(candidate_file_name): return candidate_file_name attempt += 1 if", "\"Cannot find non existing file from pattern %s\" raise ValueError(msg % file_name) @staticmethod", "queue to communicate measurements. A resource monitor is launched as a subprocess. The", "for f in os.listdir(path) if f.endswith('.json')] config = {} # Configuration with params/vars/extensions", "key in self.fields.keys(): metrics[key] = [] # What's in output: # proc_pid date", "True, *ConfigurationError* exception is thrown. Regexp pattern must return two groups (1 and", "class OpenFile(object): \"\"\"Class that can work with gzipped and regular textual files.\"\"\" def", "subfolders as well. :param str directory: A directory to search files in. :param", "'parameters' not in config: return params = config['parameters'] for name in params: val", "ValueError(\"Configuration load error. The 'path' parameter cannot be None.\") if not os.path.isdir(path): raise", "in-place. :param dict dictionary: Dictionary to check. :param str key: A key that", "'framework' with value \"tensorflow\" OR\\ \"caffe2\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"],", "dictionary[field] not in values: return False if matches is not None: matches['%s_0' %", "check_extension=False): \"\"\"Reads JSON object from file 'fname'. :param str fname: File name. :param", "\"\"\"A private class that imports a particular models and return boolean variable indicating", "def __enter__(self): if self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname, self.__flags[0]) else: self.__fobj = open(self.__fname, self.__flags[1])", "if not is_user_config: # If this is not a user-provided configuration, we disallow", "both_lists and not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int, float, long]') dest[key]", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "source: # Firstly, check that type of value is expected. val_type = type(source[key]).__name__", "contain all keys from query to be matched. In this case, the intersection", "import has been succesfull or not. Used by a Modules class to identify", "If *must_match* is True and not match or if value\\ is not a", "value. Found this definition: %s=%s\" % (name, val) ) params[name] = val['val'] return", "schema[val_type]: logging.warn(\"The name of a root key is '%s' but expected is one", "('str', 'int', 'float', 'bool')\" % field_type index = int(fields_split[2]) if len(fields_split) == 3:", "into a JSON file. :param str file_name: Name of a file to serialie", "match or if value\\ is not a json-parseable string. \"\"\" matcher = re.compile(pattern)", "src_val_type.__name__) ) # Types and expected key names. Types must always match, else", "}, policy='relaxed') Match dictionary if it does not contain key 'framework' OR contains\\", "name that does not exist. :param str file_name: Input file name. :rtype: str", "user configuration, update its value else: # Just parameter value val_type = 'str'", "match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"] }, policy='strict') Match dictionary only if", "remove_prefix else key return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod def dump_json_to_file(dictionary, file_name): \"\"\"", "= self.fields[field]['type'] idx = self.fields[field]['index'] count = self.fields[field]['count'] if count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx],", ":rtype: bool \"\"\" if query is None: return True assert policy in ['relaxed',", "valid_types): raise ConfigurationError( \"Configuration update error - expecting value types to be same", "= dictionary[field] for index, group in enumerate(match.groups()): matches['%s_%d' % (field, index+1)] = group", "32. :param dict dictionary: Dictionary to match. :param dict query: Query to use.", "fixed and its value is `proc.pid`. :param float frequency: A sampling frequency in", "match primitive types such as numbers and strings not lists or dictionaries. If", "empty it in host OS. \"\"\" self.empty_pid_file() self.queue = Queue() self.monitor_process = Process(", ":raises ConfigurationError: If *must_match* is True and not match or if value\\ is", "Development LP # # Licensed under the Apache License, Version 2.0 (the \"License\");", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "for root. if is_root and key not in schema[val_type]: logging.warn(\"The name of a", "% field_type index = int(fields_split[2]) if len(fields_split) == 3: count = -1 elif", "be available\", module_name) have_module = False return have_module class Modules(object): \"\"\"A class that", "['wb', 'w'] def __enter__(self): if self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname, self.__flags[0]) else: self.__fobj =", "(a) contains key 'framework' with value \"tensorflow\" OR \"caffe2\"\\ and (b) it contains", "only in parameters section where parameter information is defined. There are two scenarios", "It's a whitespace separated string of numbers. queue.put(output.strip()) @staticmethod def str_to_type(str_val, val_type): if", "module_name: A name of a module to try to import, something like 'numpy',", "this case, parameter redefinition is prohibited. If `parameters` section in `config` redefines existing", "else source[key] class ResourceMonitor(object): \"\"\"The class is responsible for launching/shutting down/communicating with external", "1, 'on') else: assert False, \"Invalid value type %s\" % val_type def get_measurements(self):", "= copy.deepcopy(source[key]) else: # The key from source is in dest. both_dicts =", "\"\"\" return_dictionary = {} for key in dictionary: if key.startswith(prefix): return_key = key[len(prefix):]", "IOUtils.mkdirf(fname) with OpenFile(fname, 'w') as fobj: json.dump(data, fobj, indent=4) class DictUtils(object): \"\"\"Container for", "val, 'type': val_type, 'desc': \"No description for this parameter provided (it was automatically", "keys names starts with *prefix*. If *remove_prefix* is True, keys in new dictionary", "value in query.iteritems(): if field not in dictionary: if policy == 'relaxed': continue", "intended behaviour for now (this also applies for update_param_info method). \"\"\" if path", "= {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']} else: schema = {'types':(list, basestring, int, float,", "os.path.isdir(path): raise ValueError(\"Configuration load error. The 'path' parameter (%s) must point to an", "fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object): \"\"\"A private class that imports a", "add_only_keys: dictionary[key] = value logging.debug(\"Key-value item (%s=%s) has been parsed and added to", "val_type == 'str': return str_val elif val_type == 'int': return int(str_val) elif val_type", "imported, certain system information will not be available\", module_name) have_module = False return", "str path: Path to load configurations from :param list files: List of file", "parameter (%s).\" \" Normally, only value needs to be provided.\" \" We will", "file_name) @staticmethod def check_file_extensions(fname, extensions): \"\"\"Checks that fname has one of the provided", "it adds a new item with value *default_value*. The dictionary is modified in-place.", "dir_name != '' and not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def find_files(directory, file_name_pattern, recursively=False): \"\"\"Find", "None: raise ValueError(\"Configuration load error. The 'path' parameter cannot be None.\") if not", "remove prefix in returned dictionary. :return: New dictionary with items which keys names", "@staticmethod def get_non_existing_file(file_name, max_attempts = 1000): \"\"\"Return file name that does not exist.", "tuple extensions: A tuple of extensions to use. Raises exception of fname does", "does not end with one of the extensions. \"\"\" if fname is None:", "= dictionary[field] else: if value == '': # Take special care if value", "None or key in add_only_keys: dictionary[key] = value logging.debug(\"Key-value item (%s=%s) has been", "dictionary: Input dictionary. :param list_or_val keys: Keys to find in dictionary :rtype: boolean", "file name etc). :param str patter: A regexp pattern for matching items in", "def ensure_exists(dictionary, key, default_value=None): \"\"\" Ensures that the dictionary *dictionary* contains key *key*", "return False elif matches is not None: matches['%s_0' % (field)] = dictionary[field] continue", "Must match is set to true\" % (line, pattern)) else: continue key =", "is prohibited. If `parameters` section in `config` redefines existing parameters in param_info (already", "like 0.1 seconds :param multiprocessing.Queue queue: A queue to communicate measurements. A resource", "default value. Found this definition: %s=%s\" % (name, val) ) params[name] = val['val']", "time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that maps metric field to a time series of its", "json-parseable string. \"\"\" matcher = re.compile(pattern) for line in iterable: match = matcher.match(line)", "Match *query* against *dictionary*. The *query* and *dictionary* are actually dictionaries. If policy", "of keys in dictionary and query is used for matching. It's assuemd we", "dict dictionary: Dictionary to update in-place. :param obj iterable: Iterable object (list, opened", "val['val'] return clean_config @staticmethod def update(dest, source, is_root=True): \"\"\"Merge **source** dictionary into **dest**", "frequency, fields_specs): \"\"\"Initializes resource monitor but does not create queue and process. :param", "in self.fields.keys(): metrics[key] = [] # What's in output: # proc_pid date virt", "particular models and return boolean variable indicating if import has been succesfull or", ":param float frequency: A sampling frequency in seconds. Can be something like 0.1", "def remove_info(config): \"\"\"In parameter section of a **config** the function removes parameter info", "or if value\\ is not a json-parseable string. \"\"\" matcher = re.compile(pattern) for", "policy='relaxed', matches=None): \"\"\" Match *query* against *dictionary*. The *query* and *dictionary* are actually", "of user defined parameters are defined either by user in a standard way", "%s = %s\" % (param_info[name]['type'], name, param_info[name]) ) if 'type' not in param_info[name]", "@staticmethod def filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates new dictionary with items which keys start", "if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname, 'w') as fobj: json.dump(data, fobj,", "One use case to use this method is to populate a dictionary with", "is not None: matches['%s_0' % (field)] = dictionary[field] for index, group in enumerate(match.groups()):", "of a file to serialie dictionary in. \"\"\" if file_name is not None:", "result. \"\"\" if 'parameters' not in config: return params = config['parameters'] for name", "'variables'], 'list':['extensions']} else: schema = {'types':(list, basestring, int, float, long)} for key in", "isinstance(dest[key], list) and isinstance(source[key], list) both_primitive = type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring,", "val) ) if isinstance(val, dict): # This is a complete parameter definition with", "as type and help messages for config_file in config_files: if not os.path.isfile(config_file): raise", "\"tensorflow\" }, policy='strict') Match dictionary only if it contains key 'framework' with value", ":param dict param_info: A parameter info dictionary that maps parameter name to its", "os.path.exists(candidate_file_name): return candidate_file_name attempt += 1 if attempt >= max_attempts: msg = \"Cannot", "that maps parameter name to its description dictionary that contains such fileds as", "\" Parameter redefinition is not allowed for non-user configuration.\" \" This is a", "\" Normally, only value needs to be provided.\" \" We will proceed but", "to load. If None, all files with JSON extension in **path** are loaded.", "# TODO what about parameter type and description? else: logging.warn( \" Parameter (%s)", "but you may want to fix this.\", json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val'] = val['val']", "\" is_root=%s, key=%s, value type=%s, expected type is one of %s\" % \\", "self.monitor_process.start() def stop(self): \"\"\"Closes queue and waits for resource monitor to finish.\"\"\" with", "value = json.loads(value) if len(value) > 0 else None if add_only_keys is None", "= ['rb', 'r'] if mode == 'r' else ['wb', 'w'] def __enter__(self): if", "'index': index, 'count': count } @staticmethod def monitor_function(launcher, pid_file, frequency, queue): \"\"\"A main", "recursively: If True, search in subdirectories. Only used for entries in path_specs that", "source and dest are JSON configuration configs or their members. :param dict dest:", "matches is not None: matches['%s_0' % (field)] = dictionary[field] continue else: match =", "is intended behaviour for now (this also applies for update_param_info method). \"\"\" if", "if add_only_keys is None or key in add_only_keys: dictionary[key] = value logging.debug(\"Key-value item", "('type') and/or description ('desc').\" \" You should fix this. Parameter definition is\" \"", "part of global configuration from this particular file config_section = json.load(file_obj) # Update", "is None: return True keys = keys if isinstance(keys, list) else [keys] for", "or not. Used by a Modules class to identify if optional python modules", "{} # Configuration with params/vars/extensions param_info = {} # Information on params such", "files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif os.path.isfile(path_spec): files.append(path_spec) return files @staticmethod def get_non_existing_file(file_name, max_attempts =", "and its value is `proc.pid`. :param float frequency: A sampling frequency in seconds.", "re import logging import subprocess import importlib from multiprocessing import Process from multiprocessing", "str file_name_pattern: A file name pattern to search. For instance, is can be", "0.1 seconds :param multiprocessing.Queue queue: A queue to communicate measurements. A resource monitor", "in add_only_keys: dictionary[key] = value logging.debug(\"Key-value item (%s=%s) has been parsed and added", "strings. For every item in *dictionary*, if type of a value is 'list',", "configuration from multiple files.\"\"\" @staticmethod def load(path, files=None): \"\"\"Loads configurations (normally in `conigs`)", "not exist. \"\"\" if not os.path.exists(file_name): return file_name attempt = 0 while True:", "loaded for not a file (%s)\" % config_file) with open(config_file) as file_obj: try:", "seconds. Can be something like 0.1 seconds \"\"\" self.launcher = launcher self.pid_file =", "configuration objects. False if these objects are members. \"\"\" def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type,", "a resource monitor # script. It's a whitespace separated string of numbers. queue.put(output.strip())", "'w'] def __enter__(self): if self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname, self.__flags[0]) else: self.__fobj = open(self.__fname,", "if it contains key 'framework' with value \"tensorflow\" OR\\ \"caffe2\". match(dictionary, query =", "= keys if isinstance(keys, list) else [keys] for key in keys: if key", "object. # TODO what about parameter type and description? else: logging.warn( \" Parameter", "dest_val_type, src_val_type, valid_types): raise ConfigurationError( \"Configuration update error - expecting value types to", "for not a file (%s)\" % config_file) with open(config_file) as file_obj: try: #", "else: # The key from source is in dest. both_dicts = isinstance(dest[key], dict)", "= value if isinstance(value, list) else [value] if dictionary[field] not in values: return", "type(source[key]) and isinstance(dest[key], (basestring, int, float, long)) if is_root: if not both_dicts and", "str patter: A regexp pattern for matching items in ``iterable``. :param bool must_match:", "config_files = [os.path.join(path, f) for f in files] else: config_files = [os.path.join(path, f)", "for standard parameters or induced automatically based on JSON parse result. \"\"\" if", "time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {} raw_fields = fields_specs.split(',') for raw_field in raw_fields: fields_split =", "not exist else find first file name that file does not exist. \"\"\"", "error.\" \" Parameter redefinition is not allowed for non-user configuration.\" \" This is", "'w') as file_obj: json.dump(dictionary, file_obj, indent=4) @staticmethod def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None,", "attempt = 0 while True: candidate_file_name = \"%s.%d\" % (file_name, attempt) if not", "= Queue() self.monitor_process = Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency, self.queue) ) self.monitor_process.start() def", "self.__fname = fname self.__flags = ['rb', 'r'] if mode == 'r' else ['wb',", "from multiprocessing import Queue from glob import glob from dlbs.exceptions import ConfigurationError class", "list files: List of file names to load. If None, all files with", "update_param_info(param_info, config, is_user_config=False): \"\"\"Update parameter info dictionary based on configurationi in **config** :param", "in. :param str file_name_pattern: A file name pattern to search. For instance, is", "fix this.\", json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val'] = val['val'] # Existing parameter from user", "definition is\" \" %s = %s\", name, param_info[name] ) @staticmethod def remove_info(config): \"\"\"In", "in destination dictionary. dest[key] = copy.deepcopy(source[key]) else: # The key from source is", "are added to *dictionary*, that are in this list. Existing items in *dictionary*", "long]') dest[key] = copy.deepcopy(source[key]) if both_lists else source[key] class ResourceMonitor(object): \"\"\"The class is", "considered, that match *pattern* (it's a regexp epression). If a particular item does", "regular textual files.\"\"\" def __init__(self, fname, mode='r'): self.__fname = fname self.__flags = ['rb',", "provided their specific value for this parameter. Types of user defined parameters are", "key, default_value=None): \"\"\" Ensures that the dictionary *dictionary* contains key *key* If key", "%s\" % (name, str(param_info[name]), val) ) if isinstance(val, dict): # This is a", "not use this file except in compliance with the License. # You may", "= { \"framework\": \"tensorflow\" }, policy='strict') Match dictionary only if it contains key", "pid into pid file. :param int pid: A pid to write. This is", "is None or key in add_only_keys: dictionary[key] = value logging.debug(\"Key-value item (%s=%s) has", "self.frequency = frequency self.queue = None self.monitor_process = None # Parse fields specs", "etc). :param str patter: A regexp pattern for matching items in ``iterable``. :param", ":return: The 'file_name' if this file does not exist else find first file", "\"\"\"Reads JSON object from file 'fname'. :param str fname: File name. :param boolean", "or JSON object :return: None of fname is None else JSON loaded from", "stored if match has been identified. :return: True if match or query is", "dictionary: if policy == 'relaxed': continue else: return False if isinstance(value, list) or", "dictionary[field] else: if value == '': # Take special care if value is", "defined parameters are defined either by user in a standard way as we", "gzipped and regular textual files.\"\"\" def __init__(self, fname, mode='r'): self.__fname = fname self.__flags", "raise ConfigurationError( \"Parameter info update error.\" \" Parameter that is defined by a", "that is defined by a dictionary must contain 'val' field that\" \" defines", "this file does not exist else find first file name that file does", "pattern)) else: continue key = match.group(1).strip() try: value = match.group(2).strip() value = json.loads(value)", "this parameter provided (it was automatically converted from its value).\" } else: param_info[name]['val']", "and process. :param str launcher: A full path to resource monitor script. :param", "file with *file_name* name. :param dict dictionary: Dictionary to serialize. :param any data:", "in new dictionary will not contain this prefix. The dictionary *dictionary* is not", "optional, so we can disable certain functionality if something is missing. \"\"\" HAVE_NUMPY", "case, we still update parameter info structure, but deal with it in slightly", "and dictionary of parameters info This method loads configuration files located in 'path'.", "else exception is thrown. if is_root: schema = {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']}", "raise ValueError(\"Configuration load error. Configuration data cannot be loaded for not a file", "for name in params: val = params[name] if not is_user_config: # If this", "file 'fname'. :param str fname: File name. :param boolean check_extension: If True, raises", "if is_root: schema = {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']} else: schema = {'types':(list,", "str :return: The 'file_name' if this file does not exist else find first", "frequency, queue): \"\"\"A main monitor worker function. :param str launcher: A full path", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "None: matches['%s_0' % (field)] = dictionary[field] continue else: match = re.compile(value).match(dictionary[field]) if not", "disallow parameter redefinition. if name in param_info: raise ConfigurationError( \"Parameter info update error.\"", "``iterable``. :param bool must_match: Specifies if every element in *iterable* must match\\ *pattern*.", "def run(self): \"\"\"Create queue and start resource monitor in background thread. Due to", "if output == '' and process.poll() is not None: break if output: #", "'output' is a string printed out by a resource monitor # script. It's", "parameter definition (value) is %s\" % (name, str(param_info[name]), val) ) if isinstance(val, dict):", "what about parameter type and description? else: logging.warn( \" Parameter (%s) entirely redefines", "dict dictionary: Dictionary to match. :param dict query: Query to use. :param ['relaxed',", "str(frequency) ] process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = process.stdout.readline()", "debugging function and most likely should not be used. \"\"\" with open(self.pid_file, 'w')", "be provided.\" \" We will proceed but you may want to fix this.\",", "\"\"\"Checks that fname has one of the provided extensions. :param str fname: The", "folder does not exist, it will be created. See documentation for :py:func:`os.makedirs` for", "long)) if is_root: if not both_dicts and not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict,", "agreed to in writing, software # distributed under the License is distributed on", "parameters section where parameter information is defined. There are two scenarios this method", "val_type=%s)\" % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__) ) # Types and expected key", "types to be same and one of %s but\" \" Dest(key=%s, val_type=%s) <-", "may not contain all keys from query to be matched. In this case,", "dictionary[field] continue else: match = re.compile(value).match(dictionary[field]) if not match: return False else: if", "provided (it was automatically converted from its value).\" } else: param_info[name]['val'] = val", "entirely redefines existing parameter (%s).\" \" Normally, only value needs to be provided.\"", "parameter information is defined. There are two scenarios this method is used: 1.", "of **config** with info removed \"\"\" clean_config = copy.deepcopy(config) if 'parameters' in clean_config:", "output: # proc_pid date virt res shrd cpu mem power gpus_power while not", "in `config` exists in param_info, it means user has provided their specific value", "configurations (normally in `conigs`) folder. :param str path: Path to load configurations from", "with open(self.pid_file, 'w'): pass except IOError: pass def write_pid_file(self, pid): \"\"\"Write the pid", "Modules class to identify if optional python modules are available. \"\"\" @staticmethod def", "has provided their specific value for this parameter. Types of user defined parameters", "happen.\" \" Parameter %s=%s, new parameter definition (value) is %s\" % (name, str(param_info[name]),", "% file_name) @staticmethod def check_file_extensions(fname, extensions): \"\"\"Checks that fname has one of the", "'desc' not in param_info[name]: logging.warn( \"Parameter definition does not contain type ('type') and/or", "in update_param_info, but just in case if 'val' not in val: raise ConfigurationError(", "in xrange(idx, idx+count) ]) return metrics def remove_pid_file(self): \"\"\"Deletes pif file from disk.\"\"\"", "or induced automatically based on JSON parse result. \"\"\" if 'parameters' not in", "not os.path.isfile(config_file): raise ValueError(\"Configuration load error. Configuration data cannot be loaded for not", "either by user in a standard way as we define types for standard", "\"\"\"Write the pid into pid file. :param int pid: A pid to write.", "path_specs that are directories. :return: List of file names satisfying *file_name_pattern* pattern. \"\"\"", "dictionary in. \"\"\" if fname is None: raise ValueError(\"File name is None\") if", "a file to serialie dictionary in. \"\"\" if file_name is not None: IOUtils.mkdirf(file_name)", "def __init__(self, launcher, pid_folder, frequency, fields_specs): \"\"\"Initializes resource monitor but does not create", "raise ConfigurationError( \"Configuration update error - expecting value types to be same and", "stderr=subprocess.STDOUT) while True: output = process.stdout.readline() if output == '' and process.poll() is", "names satisfy *file_name_pattern* pattern in folder *directory*. If *recursively* is True, scans subfolders", "(c) Copyright [2017] Hewlett Packard Enterprise Development LP # # Licensed under the", "dictionary: dictionary[key] = copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary, separator=' '): \"\"\" Converts every value", "that parent folder of this file exists. The file itself may not exist.", "__exit__(self, type, value, traceback): self.__fobj.close() class IOUtils(object): \"\"\"Container for input/output helpers\"\"\" @staticmethod def", "self.frequency, self.queue) ) self.monitor_process.start() def stop(self): \"\"\"Closes queue and waits for resource monitor", "dlbs.exceptions import ConfigurationError class OpenFile(object): \"\"\"Class that can work with gzipped and regular", "\"tensorflow\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"] }, policy='strict') Match dictionary only", "Found this definition: %s=%s\" % (name, val) ) params[name] = val['val'] return clean_config", "configs or their members. :param dict dest: Merge data to this dictionary. :param", "*query* and *dictionary* are actually dictionaries. If policy is 'strict', every key in", "key 'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='relaxed')", "def dump_json_to_file(dictionary, file_name): \"\"\" Dumps *dictionary* as a json object to a file", "into **dest** dictionary assuming source and dest are JSON configuration configs or their", "value is expected. val_type = type(source[key]).__name__ if not isinstance(source[key], schema['types']): raise ConfigurationError( \"Configuration", "IOError: pass def write_pid_file(self, pid): \"\"\"Write the pid into pid file. :param int", "key 'batch' with value 16 OR 32. :param dict dictionary: Dictionary to match.", "\"\"\" if fname is None: return assert isinstance(extensions, tuple), \"The 'extensions' must be", "keys: Keys to find in dictionary :rtype: boolean :return: True if all keys", "by a resource monitor # script. It's a whitespace separated string of numbers.", "its value).\" } else: param_info[name]['val'] = val # Do final validations if 'type'", "is type(source[key]) and isinstance(dest[key], (basestring, int, float, long)) if is_root: if not both_dicts", "1 if attempt >= max_attempts: msg = \"Cannot find non existing file from", "obj iterable: Iterable object (list, opened file name etc). :param str patter: A", "= None self.monitor_process = None # Parse fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields =", "the specific language governing permissions and # limitations under the License. \"\"\"Two classes", "this dictionary. :param bool is_root: True if **dest** and *source** are root configuration", ":param str prefix: Prefix of keys to be extracted. :param bool remove_prefix: If", "*dictionary*. Others are ignored. :param boolean ignore_erros: If true, ignore errors. :raises ConfigurationError:", "str pid_folder: A full path to folder where pid file is created. The", "if remove_prefix else key return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod def dump_json_to_file(dictionary, file_name):", "from glob import glob from dlbs.exceptions import ConfigurationError class OpenFile(object): \"\"\"Class that can", "contains\\ key 'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"]", "# Parse fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {} raw_fields = fields_specs.split(',') for", "file name is fixed and its value is `proc.pid`. :param float frequency: A", "key, dest_val_type.__name__, key, src_val_type.__name__) ) # Types and expected key names. Types must", "JSON configuration file. :param bool is_user_config: If True, the config object represents user-provided", "'float', 'bool')\" % field_type index = int(fields_split[2]) if len(fields_split) == 3: count =", "converts this list into a string using separator *separator*. The dictictionary is modified", "does not exist else find first file name that file does not exist.", "@staticmethod def remove_info(config): \"\"\"In parameter section of a **config** the function removes parameter", "to in writing, software # distributed under the License is distributed on an", "match: if must_match: raise ConfigurationError(\"Cannot match key-value from '%s' with pattern '%s'. Must", "raise ConfigurationError(\"Cannot parse JSON string '%s' with key '%s' (key-value definition: '%s'). Error", ":rtype: boolean :return: True if all keys are in dictionary or keys is", "# This should not generally happen since we deal with it in update_param_info,", "obj default_value: Default value for key if it does not exist. \"\"\" if", "implied. # See the License for the specific language governing permissions and #", "Parameter that is defined by a dictionary must contain 'val' field that\" \"", "with *file_name* name. :param dict dictionary: Dictionary to serialize. :param any data: A", "be stored if match has been identified. :return: True if match or query", "if output: # The 'output' is a string printed out by a resource", "file is created. The file name is fixed and its value is `proc.pid`.", "val = params[name] if not is_user_config: # If this is not a user-provided", "key 'framework' OR contains\\ key 'framework' with value \"tensorflow\". match(dictionary, query = {", "bool is_user_config: If True, the config object represents user-provided configuration. If False, this", "file does not exist else find first file name that file does not", "parameter section of a **config** the function removes parameter info leaving only their", "that maps metric field to a time series of its value. \"\"\" metrics", "**dest** dictionary assuming source and dest are JSON configuration configs or their members.", "If a particular item does not match, and *must_match* is True, *ConfigurationError* exception", "with items which keys names start with *prefix*. \"\"\" return_dictionary = {} for", "['relaxed', 'strict'] policy: Policy to match. :param dict matches: Dictionary where matches will", "'%s' (key-value definition: '%s'). Error is %s\" % (value, key, line, str(err))) @staticmethod", "are loaded. :return: A tuple consisting of a list of config files, configuration", "want to fix this.\", json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val'] = val['val'] # Existing parameter", "% (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__) ) # Types and expected key names.", "in ('int', 'str', 'float', 'bool'): raise ConfigurationError( \"Parameter info update error.\" \" Parameter", "dictionary return dict((k, dictionary[k]) for k in keys if k in dictionary) @staticmethod", "Types must always match, else exception is thrown. if is_root: schema = {'types':(dict,", "in os.listdir(path) if f.endswith('.json')] config = {} # Configuration with params/vars/extensions param_info =", "is not None: break if output: # The 'output' is a string printed", "maps metric field to a time series of its value. \"\"\" metrics =", "keys): \"\"\"Return subdictionary containing only keys from 'keys'. :param dict dictionary: Input dictionary.", "config = {} # Configuration with params/vars/extensions param_info = {} # Information on", "'numpy', 'pandas', 'matplotlib' etc. :return: True if module has been imported, False otherwise.", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "loaded from the file. \"\"\" if fname is None: return None if check_extension:", "(name, val) ) params[name] = val['val'] return clean_config @staticmethod def update(dest, source, is_root=True):", "to ensure that we can write to this file. If path to parent", "extracted. :param bool remove_prefix: If True, remove prefix in returned dictionary. :return: New", "str_to_type(str_val, val_type): if val_type == 'str': return str_val elif val_type == 'int': return", "for input/output helpers\"\"\" @staticmethod def mkdirf(file_name): \"\"\"Makes sure that parent folder of this", "val_type=%s) <- Source(key=%s, val_type=%s)\" % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__) ) # Types", "keys = keys if isinstance(keys, list) else [keys] for key in keys: if", "JSON object :return: None of fname is None else JSON loaded from the", "ConfigurationError(\"Cannot match key-value from '%s' with pattern '%s'. Must match is set to", "raise ValueError(\"Invalid file extension (%s). Must be one of %s\" % extensions) @staticmethod", "TODO what about parameter type and description? else: logging.warn( \" Parameter (%s) entirely", "only if it contains key 'framework' with value \"tensorflow\". match(dictionary, query = {", "timeseries field (%s)\" % field_name field_type = fields_split[1] assert field_type in ('str', 'int',", "as a subprocess. The thread is reading its output and will put the", "be created. See documentation for :py:func:`os.makedirs` for more details. :param str file_name: A", "launcher self.pid_file = os.path.join(pid_folder, 'proc.pid') self.frequency = frequency self.queue = None self.monitor_process =", "help messages for config_file in config_files: if not os.path.isfile(config_file): raise ValueError(\"Configuration load error.", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "json.load(file_obj) # Update parameters info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) # Joing configuration from this", "fname does not end with one of the extensions. \"\"\" if fname is", "In this case, parameter redefinition is prohibited. If `parameters` section in `config` redefines", "a json-parseable strings. If *add_only_keys* is not None, only those items are added", "match(dictionary, query, policy='relaxed', matches=None): \"\"\" Match *query* against *dictionary*. The *query* and *dictionary*", "dictionary in. \"\"\" if file_name is not None: IOUtils.mkdirf(file_name) with open(file_name, 'w') as", "str file_name: Name of a file to serialie dictionary in. \"\"\" if fname", "not a user-provided configuration, we disallow parameter redefinition. if name in param_info: raise", "Keys to extract :rtype: dict :return: Dictionary that contains key/value pairs for key", "params = clean_config['parameters'] for name in params: val = params[name] if isinstance(val, dict):", "Information on params such as type and help messages for config_file in config_files:", "-1 elif fields_split[3] == '': count = 0 else: count = int(fields_split[3]) self.fields[field_name]", "else find first file name that file does not exist. \"\"\" if not", "of parsing a JSON configuration file. :return: A copy of **config** with info", "from *iterable* object. This method modifies/updates *dictionary* with items from *iterable* object. This", "be something like 0.1 seconds :param multiprocessing.Queue queue: A queue to communicate measurements.", "add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False): \"\"\" Updates *dictionary* with items from *iterable*", "metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count) ]) return", "error - expecting value types to be same and one of %s but\"", "one of %s but\" \" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)\" % (valid_types, key,", "'' and not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def find_files(directory, file_name_pattern, recursively=False): \"\"\"Find files in", "2). First group is considered as a key, and second group is considered", "**path** are loaded. :return: A tuple consisting of a list of config files,", "where pid file is created. The file name is fixed and its value", "This is intended behaviour for now (this also applies for update_param_info method). \"\"\"", "into a queue. A main thread will then dequeue all data at once", "like 'numpy', 'pandas', 'matplotlib' etc. :return: True if module has been imported, False", "in. \"\"\" if fname is None: raise ValueError(\"File name is None\") if check_extension:", "exists in param_info, it means user has provided their specific value for this", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "names start with *prefix*. \"\"\" return_dictionary = {} for key in dictionary: if", "this definition: %s=%s\" % (name, val) ) if name not in param_info: param_info[name]", "len(fields_split) == 3: count = -1 elif fields_split[3] == '': count = 0", "Can be something like 0.1 seconds :param multiprocessing.Queue queue: A queue to communicate", "you may not use this file except in compliance with the License. #", "seconds \"\"\" self.launcher = launcher self.pid_file = os.path.join(pid_folder, 'proc.pid') self.frequency = frequency self.queue", "else: if not both_lists and not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int,", "keys is None: return True keys = keys if isinstance(keys, list) else [keys]", "folder *directory*. If *recursively* is True, scans subfolders as well. :param str directory:", "means user has provided their specific value for this parameter. Types of user", ":param str directory: A directory to search files in. :param str file_name_pattern: A", "not be used. \"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('%d' % pid) def", "ones if key already exists. One use case to use this method is", "ignore_erros: If true, ignore errors. :raises ConfigurationError: If *must_match* is True and not", "*key* If key does not exist, it adds a new item with value", "time series of its value. \"\"\" metrics = {} for key in self.fields.keys():", "to search. For instance, is can be '*.log' :param bool recursively: If True,", "boolean :return: True if all keys are in dictionary or keys is None", "If not None, specifies keys that are added into\\ *dictionary*. Others are ignored.", "(it was automatically converted from its value).\" } else: param_info[name]['val'] = val #", "configuration.\" \" This is a system configuration error that must not happen.\" \"", "parameters, variables and extensions. The **config** is a result of parsing a JSON", "param_info) @staticmethod def update_param_info(param_info, config, is_user_config=False): \"\"\"Update parameter info dictionary based on configurationi", "(%s).\" \" Normally, only value needs to be provided.\" \" We will proceed", "count = -1 elif fields_split[3] == '': count = 0 else: count =", ":param bool remove_prefix: If True, remove prefix in returned dictionary. :return: New dictionary", "true, ignore errors. :raises ConfigurationError: If *must_match* is True and not match or", "[f for p in os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))] return files @staticmethod", "name, param_info[name]) ) if 'type' not in param_info[name] or 'desc' not in param_info[name]:", "pattern. \"\"\" files = [] for path_spec in path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern,", "that folder. This method fails if one parameter is defined in multiple files.", "pid file is created. The file name is fixed and its value is", "\" Parameter (%s) entirely redefines existing parameter (%s).\" \" Normally, only value needs", "\"\"\"Loads configurations (normally in `conigs`) folder. :param str path: Path to load configurations", "\"\"\"The class is responsible for launching/shutting down/communicating with external resource manager that monitors", "dequeue all data at once once experiment is completed. \"\"\" cmd = [", "== '' and process.poll() is not None: break if output: # The 'output'", "from that folder. This method fails if one parameter is defined in multiple", "not recursively: files = [f for f in glob(os.path.join(directory, file_name_pattern))] else: files =", "A parameter info dictionary that maps parameter name to its description dictionary that", "@staticmethod def update(dest, source, is_root=True): \"\"\"Merge **source** dictionary into **dest** dictionary assuming source", "metrics[key] = [] # What's in output: # proc_pid date virt res shrd", "attempt >= max_attempts: msg = \"Cannot find non existing file from pattern %s\"", "file_obj, indent=4) @staticmethod def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False): \"\"\" Updates *dictionary*", "not match or if value\\ is not a json-parseable string. \"\"\" matcher =", "for index, group in enumerate(match.groups()): matches['%s_%d' % (field, index+1)] = group continue return", "val) ) if name not in param_info: param_info[name] = copy.deepcopy(val) # New parameter,", "Match dictionary only if it contains key 'framework' with value \"tensorflow\". match(dictionary, query", "We will proceed but you may want to fix this.\", json.dumps(val), json.dumps(param_info[name]) )", "exception is thrown. Regexp pattern must return two groups (1 and 2). First", "satisfying *file_name_pattern* pattern. \"\"\" if not recursively: files = [f for f in", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "with value \"tensorflow\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"] }, policy='strict') Match", "'type' not in param_info[name] or 'desc' not in param_info[name]: logging.warn( \"Parameter definition does", "= { 'type': field_type, 'index': index, 'count': count } @staticmethod def monitor_function(launcher, pid_file,", "type(source[key]).__name__ if not isinstance(source[key], schema['types']): raise ConfigurationError( \"Configuration update error - unexpected type", "if isinstance(dictionary[key], list): dictionary[key] = separator.join(str(elem) for elem in dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary,", "fname is None: return None if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with OpenFile(fname, 'r')", "with value *default_value*. The dictionary is modified in-place. :param dict dictionary: Dictionary to", "os.path.isfile(config_file): raise ValueError(\"Configuration load error. Configuration data cannot be loaded for not a", "description dictionary that contains such fileds as value, help message, type, constraints etc.", "\"\"\"Two classes are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\" import os import copy", "in values: return False if matches is not None: matches['%s_0' % (field)] =", "every key in query must exist in dictionary with the same value to", "by a dictionary must contain 'val' field that\" \" defines its default value.", "# What's in output: # proc_pid date virt res shrd cpu mem power", "return dict((k, dictionary[k]) for k in keys if k in dictionary) @staticmethod def", "extensions. \"\"\" if fname is None: return assert isinstance(extensions, tuple), \"The 'extensions' must", "int, float, long]') dest[key] = copy.deepcopy(source[key]) if both_lists else source[key] class ResourceMonitor(object): \"\"\"The", "schema = {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']} else: schema = {'types':(list, basestring, int,", "of a **config** the function removes parameter info leaving only their values :param", "dictionary: Dictionary to match. :param dict query: Query to use. :param ['relaxed', 'strict']", "= match.group(2).strip() value = json.loads(value) if len(value) > 0 else None if add_only_keys", "not None: matches['%s_0' % (field)] = dictionary[field] continue else: match = re.compile(value).match(dictionary[field]) if", "remove_prefix: If True, remove prefix in returned dictionary. :return: New dictionary with items", "or '.json.gz'. :rtype: None or JSON object :return: None of fname is None", "is 'strict', every key in query must exist in dictionary with the same", "possibly, recursively. Find files which names satisfy *file_name_pattern* pattern in folder *directory*. If", "write_pid_file(self, pid): \"\"\"Write the pid into pid file. :param int pid: A pid", "Must be one of %s\" % extensions) @staticmethod def read_json(fname, check_extension=False): \"\"\"Reads JSON", "list) both_primitive = type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring, int, float, long)) if", "None: matches['%s_0' % (field)] = dictionary[field] else: if value == '': # Take", "read_json(fname, check_extension=False): \"\"\"Reads JSON object from file 'fname'. :param str fname: File name.", "param_info[name]['val'] = val # Do final validations if 'type' in param_info[name] and param_info[name]['type']", "self.fields: tp = self.fields[field]['type'] idx = self.fields[field]['index'] count = self.fields[field]['count'] if count ==", "def stop(self): \"\"\"Closes queue and waits for resource monitor to finish.\"\"\" with open(self.pid_file,", "= False return have_module class Modules(object): \"\"\"A class that enumerates non-standard python modules", "of keys to be extracted. :param bool remove_prefix: If True, remove prefix in", "count = self.fields[field]['count'] if count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count == 0:", "recursively: files = [f for f in glob(os.path.join(directory, file_name_pattern))] else: files = [f", "'%s'. Must match is set to true\" % (line, pattern)) else: continue key", "= clean_config['parameters'] for name in params: val = params[name] if isinstance(val, dict): #", "-1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp)", "of the file for which we want to make sure\\ its parent directory", "satisfying *file_name_pattern* pattern. \"\"\" files = [] for path_spec in path_specs: if os.path.isdir(path_spec):", "with *prefix*. \"\"\" return_dictionary = {} for key in dictionary: if key.startswith(prefix): return_key", "is defined by a dictionary must contain 'val' field that\" \" defines its", "\"Parameter info update error.\" \" Parameter that is defined by a dictionary must", "resource manager that monitors system resource consumption. proc_pid date virt res shrd cpu", "system configuration error that must not happen.\" \" Parameter %s=%s, new parameter definition", "must not happen.\" \" Parameter %s=%s, new parameter definition (value) is %s\" %", "class ConfigurationLoader(object): \"\"\"Loads experimenter configuration from multiple files.\"\"\" @staticmethod def load(path, files=None): \"\"\"Loads", "user-provided configuration, we disallow parameter redefinition. if name in param_info: raise ConfigurationError( \"Parameter", "scans subfolders as well. :param str directory: A directory to search files in.", "file_name: Name of a file to serialie dictionary in. \"\"\" if file_name is", "not os.path.exists(candidate_file_name): return candidate_file_name attempt += 1 if attempt >= max_attempts: msg =", "against *dictionary*. The *query* and *dictionary* are actually dictionaries. If policy is 'strict',", "proc_pid date virt res shrd cpu mem power gpus_power \"\"\" def __init__(self, launcher,", "are directories. :param bool recursively: If True, search in subdirectories. Only used for", "thread is reading its output and will put the data into a queue.", "single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration load error. Invalid JSON configuration in", "method is to populate a dictionary with key-values from log files. :param dict", "`config` redefines existing parameters in param_info (already loaded params), program terminates. 2. Load", "in params: val = params[name] if isinstance(val, dict): # This should not generally", "where matches will be stored if match has been identified. :return: True if", "0.1 seconds \"\"\" self.launcher = launcher self.pid_file = os.path.join(pid_folder, 'proc.pid') self.frequency = frequency", "resource monitor # script. It's a whitespace separated string of numbers. queue.put(output.strip()) @staticmethod", "% (param_info[name]['type'], name, param_info[name]) ) if 'type' not in param_info[name] or 'desc' not", "else JSON loaded from the file. \"\"\" if fname is None: return None", "glob(os.path.join(directory, file_name_pattern))] else: files = [f for p in os.walk(directory) for f in", "re.compile(value).match(dictionary[field]) if not match: return False else: if matches is not None: matches['%s_0'", "dictionary with configuration section that may contain parameters, variables and extensions. The **config**", "dict dictionary: Dictionary to serialize. :param any data: A data to dump into", "contain 'val' field that\" \" defines its default value. Found this definition: %s=%s\"", "[] # What's in output: # proc_pid date virt res shrd cpu mem", "source is in dest. both_dicts = isinstance(dest[key], dict) and isinstance(source[key], dict) both_lists =", "enumerates non-standard python modules this project depends on. They are optional, so we", "\"\"\" if keys is None: return True keys = keys if isinstance(keys, list)", "return True assert policy in ['relaxed', 'strict'], \"\" for field, value in query.iteritems():", "OpenFile(object): \"\"\"Class that can work with gzipped and regular textual files.\"\"\" def __init__(self,", "key.startswith(prefix): return_key = key[len(prefix):] if remove_prefix else key return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return return_dictionary", "ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration load error. Invalid JSON configuration in file %s\", config_file)", "of this file exists. The file itself may not exist. A typical usage", "# limitations under the License. \"\"\"Two classes are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`.", "return False return True @staticmethod def ensure_exists(dictionary, key, default_value=None): \"\"\" Ensures that the", "dictionary: Dictionary to search keys in. :param str prefix: Prefix of keys to", "- expecting value types to be same and one of %s but\" \"", "and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that maps metric field to a time", "helpers\"\"\" @staticmethod def mkdirf(file_name): \"\"\"Makes sure that parent folder of this file exists.", "bool \"\"\" if query is None: return True assert policy in ['relaxed', 'strict'],", "defined in multiple files. This is intended behaviour for now (this also applies", "field_type in ('str', 'int', 'float', 'bool'),\\ \"Invalid field type (%s). Must be one", "not isinstance(value, basestring): values = value if isinstance(value, list) else [value] if dictionary[field]", "new dictionary with items which keys start with *prefix*. Creates new dictionary with", "The 'path' parameter (%s) must point to an existing directory.\" % path) if", "files specified by an `inputs` parameter. :param list path_specs: A list of file", "parameter is defined in multiple files. This is intended behaviour for now (this", "maps parameter name to its description dictionary that contains such fileds as value,", "clean_config @staticmethod def update(dest, source, is_root=True): \"\"\"Merge **source** dictionary into **dest** dictionary assuming", "cannot be imported, certain system information will not be available\", module_name) have_module =", "ConfigurationError( \"Parameter info remove error.\" \" Parameter that is defined by a dictionary", "The key from source is in dest. both_dicts = isinstance(dest[key], dict) and isinstance(source[key],", "key in add_only_keys: dictionary[key] = value logging.debug(\"Key-value item (%s=%s) has been parsed and", "matches['%s_%d' % (field, index+1)] = group continue return True class ConfigurationLoader(object): \"\"\"Loads experimenter", "suspicious - we can do it only for root. if is_root and key", "one of the extensions. \"\"\" if fname is None: return assert isinstance(extensions, tuple),", "structure, but deal with it in slightly different way. If parameter in `config`", "json.dump(dictionary, file_obj, indent=4) @staticmethod def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False): \"\"\" Updates", "elif val_type == 'int': return int(str_val) elif val_type == 'float': return float(str_val) elif", "True if match or query is None :rtype: bool \"\"\" if query is", "fields_split[0] assert field_name not in self.fields,\\ \"Found duplicate timeseries field (%s)\" % field_name", "must match\\ *pattern*. If True and not match, raises exception. :param list add_only_keys:", "and not match or if value\\ is not a json-parseable string. \"\"\" matcher", "data, put it into lists and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that maps", "condition OR applies. For instance: match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='strict')", "data to this dictionary. :param dict source: Merge data from this dictionary. :param", "string printed out by a resource monitor # script. It's a whitespace separated", "fhandle.write('%d' % pid) def run(self): \"\"\"Create queue and start resource monitor in background", "has invalid type = '%s'.\" \" Parameter definition is %s = %s\" %", "is not allowed for non-user configuration.\" \" This is a system configuration error", "redefines existing parameter (%s).\" \" Normally, only value needs to be provided.\" \"", "value, traceback): self.__fobj.close() class IOUtils(object): \"\"\"Container for input/output helpers\"\"\" @staticmethod def mkdirf(file_name): \"\"\"Makes", "What's in output: # proc_pid date virt res shrd cpu mem power gpus_power", "dir_name = os.path.dirname(file_name) if dir_name != '' and not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def", "file name pattern to search. Only used for entries in path_specs that are", "directories. :param bool recursively: If True, search in subdirectories. Only used for entries", "in ``iterable``. :param bool must_match: Specifies if every element in *iterable* must match\\", "def mkdirf(file_name): \"\"\"Makes sure that parent folder of this file exists. The file", "add_only_keys: If not None, specifies keys that are added into\\ *dictionary*. Others are", "key 'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"] },", "a particular models and return boolean variable indicating if import has been succesfull", "ignore errors. :raises ConfigurationError: If *must_match* is True and not match or if", "configuration, update its value else: # Just parameter value val_type = 'str' if", "\"\"\"Initializes resource monitor but does not create queue and process. :param str launcher:", "find in dictionary :rtype: boolean :return: True if all keys are in dictionary", "file_name_pattern))] else: files = [f for p in os.walk(directory) for f in glob(os.path.join(p[0],", ":return: True if module has been imported, False otherwise. \"\"\" have_module = True", "parameter type and description? else: logging.warn( \" Parameter (%s) entirely redefines existing parameter", "be used. \"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('%d' % pid) def run(self):", "result of parsing a JSON configuration file. :return: A copy of **config** with", "for field, value in query.iteritems(): if field not in dictionary: if policy ==", "from this particular file config_section = json.load(file_obj) # Update parameters info. ConfigurationLoader.update_param_info(param_info, config_section,", "data: A data to dump into a JSON file. :param str file_name: Name", "to serialize. :param any data: A data to dump into a JSON file.", "os.path.dirname(file_name) if dir_name != '' and not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def find_files(directory, file_name_pattern,", "'val' not in val: raise ConfigurationError( \"Parameter info remove error.\" \" Parameter that", "['rb', 'r'] if mode == 'r' else ['wb', 'w'] def __enter__(self): if self.__fname.endswith('.gz'):", "file name. :rtype: str :return: The 'file_name' if this file does not exist", "LP # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in *dictionary*, if type of a value is 'list', converts this list into", "for p in os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))] return files @staticmethod def", "line, str(err))) @staticmethod def match(dictionary, query, policy='relaxed', matches=None): \"\"\" Match *query* against *dictionary*.", "import, something like 'numpy', 'pandas', 'matplotlib' etc. :return: True if module has been", "count = int(fields_split[3]) self.fields[field_name] = { 'type': field_type, 'index': index, 'count': count }", "items in *iterable* are considered, that match *pattern* (it's a regexp epression). If", "as numbers and strings not lists or dictionaries. If values in query are", "file with *file_name* name. :param dict dictionary: Dictionary to serialize. :param str file_name:", "fname, mode='r'): self.__fname = fname self.__flags = ['rb', 'r'] if mode == 'r'", "contains key 'framework' with value \"tensorflow\" OR \"caffe2\"\\ and (b) it contains key", "serialie dictionary in. \"\"\" if file_name is not None: IOUtils.mkdirf(file_name) with open(file_name, 'w')", "Parameter redefinition is not allowed for non-user configuration.\" \" This is a system", "file. :param bool is_user_config: If True, the config object represents user-provided configuration. If", "# Joing configuration from this single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration load", "else: files = [f for p in os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))]", "at once once experiment is completed. \"\"\" cmd = [ launcher, pid_file, '',", "iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False): \"\"\" Updates *dictionary* with items from *iterable* object.", "to match. :param dict query: Query to use. :param ['relaxed', 'strict'] policy: Policy", "object must support ``for something in iterable`` (list, opened file etc). Only those", "False if these objects are members. \"\"\" def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types): raise", "= process.stdout.readline() if output == '' and process.poll() is not None: break if", "update in-place. :param obj iterable: Iterable object (list, opened file name etc). :param", "adds a new item with value *default_value*. The dictionary is modified in-place. :param", "self.fields[field]['index'] count = self.fields[field]['count'] if count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count ==", "empty_pid_file(self): \"\"\"Empty pid file.\"\"\" try: with open(self.pid_file, 'w'): pass except IOError: pass def", "int, float, long)} for key in source: # Firstly, check that type of", "fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object): \"\"\"A private class that imports", "not None: break if output: # The 'output' is a string printed out", "*iterable* must match\\ *pattern*. If True and not match, raises exception. :param list", "since we deal with it in update_param_info, but just in case if 'val'", "a standard way as we define types for standard parameters or induced automatically", "fields_specs.split(',') for raw_field in raw_fields: fields_split = raw_field.split(':') assert len(fields_split) in (3, 4),\\", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "to search keys in. :param str prefix: Prefix of keys to be extracted.", "@staticmethod def read_json(fname, check_extension=False): \"\"\"Reads JSON object from file 'fname'. :param str fname:", "dict dictionary: Input dictionary. :param list_or_val keys: Keys to find in dictionary :rtype:", "path: Path to load configurations from :param list files: List of file names", "return True class ConfigurationLoader(object): \"\"\"Loads experimenter configuration from multiple files.\"\"\" @staticmethod def load(path,", "of %s\" % extensions) @staticmethod def read_json(fname, check_extension=False): \"\"\"Reads JSON object from file", "fix this. Parameter definition is\" \" %s = %s\", name, param_info[name] ) @staticmethod", "with it in update_param_info, but just in case if 'val' not in val:", "mem power gpus_power \"\"\" def __init__(self, launcher, pid_folder, frequency, fields_specs): \"\"\"Initializes resource monitor", "new dictionary with items from *dictionary* which keys names starts with *prefix*. If", "policy is 'relaxed', dictionary may not contain all keys from query to be", "info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) # Joing configuration from this single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section))", "if not ignore_errors: raise ConfigurationError(\"Cannot parse JSON string '%s' with key '%s' (key-value", "of %s but\" \" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)\" % (valid_types, key, dest_val_type.__name__,", "Values must be a json-parseable strings. If *add_only_keys* is not None, only those", "[\"tensorflow\", \"caffe2\"] }, policy='strict') Match dictionary only if it contains key 'framework' with", "} else: param_info[name]['val'] = val # Do final validations if 'type' in param_info[name]", "# Take special care if value is an empty string if value !=", "class DictUtils(object): \"\"\"Container for dictionary helpers.\"\"\" @staticmethod def subdict(dictionary, keys): \"\"\"Return subdictionary containing", "% str_val return v in ('true', 1, 'on') else: assert False, \"Invalid value", "We are interested here only in parameters section where parameter information is defined.", "idx = self.fields[field]['index'] count = self.fields[field]['count'] if count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif", "raise ConfigurationError( \"Parameter info remove error.\" \" Parameter that is defined by a", "If True, search in subdirectories. Only used for entries in path_specs that are", "ValueError: logging.error(\"Configuration load error. Invalid JSON configuration in file %s\", config_file) raise return", "\"\"\" if not recursively: files = [f for f in glob(os.path.join(directory, file_name_pattern))] else:", "assert field_type in ('str', 'int', 'float', 'bool'),\\ \"Invalid field type (%s). Must be", "max_attempts: msg = \"Cannot find non existing file from pattern %s\" raise ValueError(msg", "None: IOUtils.mkdirf(file_name) with open(file_name, 'w') as file_obj: json.dump(dictionary, file_obj, indent=4) @staticmethod def add(dictionary,", "list) else type(val).__name__ if name not in param_info: param_info[name] = { 'val': val,", "'bool')\" % field_type index = int(fields_split[2]) if len(fields_split) == 3: count = -1", "is %s\" % (value, key, line, str(err))) @staticmethod def match(dictionary, query, policy='relaxed', matches=None):", "[\"tensorflow\", \"caffe2\"], \"batch\": [16, 32] }, policy='strict') Match dictionary only if it (a)", "def try_import(module_name): \"\"\"Tries to import module. :param str module_name: A name of a", "k in keys if k in dictionary) @staticmethod def contains(dictionary, keys): \"\"\"Checkes if", "return have_module class Modules(object): \"\"\"A class that enumerates non-standard python modules this project", "classes are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\" import os import copy import", "extensions: A tuple of extensions to use. Raises exception of fname does not", "a directory, possibly, recursively. Find files which names satisfy *file_name_pattern* pattern in folder", "\"\"\" self.launcher = launcher self.pid_file = os.path.join(pid_folder, 'proc.pid') self.frequency = frequency self.queue =", "return v in ('true', 1, 'on') else: assert False, \"Invalid value type %s\"", "multiprocessing import Queue from glob import glob from dlbs.exceptions import ConfigurationError class OpenFile(object):", "metric field to a time series of its value. \"\"\" metrics = {}", "not end with '.json' or '.json.gz'. :rtype: None or JSON object :return: None", "match. :param dict matches: Dictionary where matches will be stored if match has", "See the License for the specific language governing permissions and # limitations under", "param_info (already loaded params), program terminates. 2. Load user-provided configuration. In this case,", "to use. Raises exception of fname does not end with one of the", "name:type:index: or name:type:index:count\" % raw_field field_name = fields_split[0] assert field_name not in self.fields,\\", "and second group is considered to be value. Values must be a json-parseable", "# If this is not a user-provided configuration, we disallow parameter redefinition. if", "type and help messages for config_file in config_files: if not os.path.isfile(config_file): raise ValueError(\"Configuration", "parameters in param_info (already loaded params), program terminates. 2. Load user-provided configuration. In", "(normally in `conigs`) folder. :param str path: Path to load configurations from :param", "assert isinstance(extensions, tuple), \"The 'extensions' must be a tuple.\" if not fname.endswith(extensions): raise", "only keys from 'keys'. :param dict dictionary: Input dictionary. :param list_or_val keys: Keys", ":param list_or_val keys: Keys to find in dictionary :rtype: boolean :return: True if", "measurements. A resource monitor is launched as a subprocess. The thread is reading", "from its value).\" } else: param_info[name]['val'] = val # Do final validations if", "str prefix: Prefix of keys to be extracted. :param bool remove_prefix: If True,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", ":return: None of fname is None else JSON loaded from the file. \"\"\"", "continue else: return False if isinstance(value, list) or not isinstance(value, basestring): values =", "return_dictionary @staticmethod def dump_json_to_file(dictionary, file_name): \"\"\" Dumps *dictionary* as a json object to", "2. Load user-provided configuration. In this case, we still update parameter info structure,", "*dictionary*, if type of a value is 'list', converts this list into a", "this definition: %s=%s\" % (name, val) ) params[name] = val['val'] return clean_config @staticmethod", "with params/vars/extensions param_info = {} # Information on params such as type and", "(already loaded params), program terminates. 2. Load user-provided configuration. In this case, we", "not contain all keys from query to be matched. In this case, the", "to be matched. In this case, the intersection of keys in dictionary and", "are defined either by user in a standard way as we define types", "return None if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with OpenFile(fname, 'r') as fobj: return", "load error. The 'path' parameter (%s) must point to an existing directory.\" %", "in **path** are loaded. :return: A tuple consisting of a list of config", "file. :param int pid: A pid to write. This is a debugging function", "extensions to use. Raises exception of fname does not end with one of", "val_type): if val_type == 'str': return str_val elif val_type == 'int': return int(str_val)", "with pattern '%s'. Must match is set to true\" % (line, pattern)) else:", "python modules are available. \"\"\" @staticmethod def try_import(module_name): \"\"\"Tries to import module. :param", "more details. :param str file_name: A name of the file for which we", "info remove error.\" \" Parameter that is defined by a dictionary must contain", "\"\"\" import os import copy import json import gzip import re import logging", "dictionary *dictionary* is not modified. :param dict dictionary: Dictionary to search keys in.", "= self.queue.get().strip().split() for field in self.fields: tp = self.fields[field]['type'] idx = self.fields[field]['index'] count", ":param obj iterable: Iterable object (list, opened file name etc). :param str patter:", "type of key value: \" \" is_root=%s, key=%s, value type=%s, expected type is", "For instance: match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='strict') Match dictionary only", "output == '' and process.poll() is not None: break if output: # The", "search keys in. :param str prefix: Prefix of keys to be extracted. :param", "@staticmethod def gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get files specified by an `inputs` parameter. :param", "return assert isinstance(extensions, tuple), \"The 'extensions' must be a tuple.\" if not fname.endswith(extensions):", "contains key 'batch' with value 16 OR 32. :param dict dictionary: Dictionary to", "type ('type') and/or description ('desc').\" \" You should fix this. Parameter definition is\"", "name pattern to search. For instance, is can be '*.log' :param bool recursively:", "it contains key 'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\": \"tensorflow\"", "= os.path.join(pid_folder, 'proc.pid') self.frequency = frequency self.queue = None self.monitor_process = None #", "etc. :return: True if module has been imported, False otherwise. \"\"\" have_module =", "bool recursively: If True, search in subdirectories. :return: List of file names satisfying", "This method modifies/updates *dictionary* with items from *iterable* object. This object must support", "param_info[name] = { 'val': val, 'type': val_type, 'desc': \"No description for this parameter", "date virt res shrd cpu mem power gpus_power \"\"\" def __init__(self, launcher, pid_folder,", "applies for update_param_info method). \"\"\" if path is None: raise ValueError(\"Configuration load error.", "file extension (%s). Must be one of %s\" % extensions) @staticmethod def read_json(fname,", "assert False, \"Invalid value type %s\" % val_type def get_measurements(self): \"\"\"Dequeue all data,", "not match, and *must_match* is True, *ConfigurationError* exception is thrown. Regexp pattern must", "(line, pattern)) else: continue key = match.group(1).strip() try: value = match.group(2).strip() value =", "thread will then dequeue all data at once once experiment is completed. \"\"\"", "will be created. See documentation for :py:func:`os.makedirs` for more details. :param str file_name:", "not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def find_files(directory, file_name_pattern, recursively=False): \"\"\"Find files in a directory,", "files are loaded from that folder. This method fails if one parameter is", "(valid_types, key, dest_val_type.__name__, key, src_val_type.__name__) ) # Types and expected key names. Types", "is suspicious - we can do it only for root. if is_root and", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "configuration in file %s\", config_file) raise return (config_files, config, param_info) @staticmethod def update_param_info(param_info,", "str_val elif val_type == 'int': return int(str_val) elif val_type == 'float': return float(str_val)", "def write_json(fname, data, check_extension=False): \"\"\" Dumps *dictionary* as a json object to a", "ensure_exists(dictionary, key, default_value=None): \"\"\" Ensures that the dictionary *dictionary* contains key *key* If", "to folder where pid file is created. The file name is fixed and", "`files` is empty, all json files are loaded from that folder. This method", "dictionary is modified in-place. :param dict dictionary: Dictionary to check. :param str key:", "object to a file with *file_name* name. :param dict dictionary: Dictionary to serialize.", "A tuple consisting of a list of config files, configuration object (dictionary) and", "dict((k, dictionary[k]) for k in keys if k in dictionary) @staticmethod def contains(dictionary,", "Do final validations if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str',", "file. :return: A copy of **config** with info removed \"\"\" clean_config = copy.deepcopy(config)", "is_root=True): \"\"\"Merge **source** dictionary into **dest** dictionary assuming source and dest are JSON", "must_match: Specifies if every element in *iterable* must match\\ *pattern*. If True and", "is a result of parsing a JSON configuration file. :return: A copy of", "fields_split[1] assert field_type in ('str', 'int', 'float', 'bool'),\\ \"Invalid field type (%s). Must", "typical usage is to ensure that we can write to this file. If", "same and one of %s but\" \" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)\" %", "is in dest. both_dicts = isinstance(dest[key], dict) and isinstance(source[key], dict) both_lists = isinstance(dest[key],", "'relaxed': continue else: return False if isinstance(value, list) or not isinstance(value, basestring): values", "needs to be provided.\" \" We will proceed but you may want to", "@staticmethod def check_file_extensions(fname, extensions): \"\"\"Checks that fname has one of the provided extensions.", "A data to dump into a JSON file. :param str file_name: Name of", "to true\" % (line, pattern)) else: continue key = match.group(1).strip() try: value =", "in schema[val_type]: logging.warn(\"The name of a root key is '%s' but expected is", "A key that must exist. :param obj default_value: Default value for key if", "msg = \"Cannot find non existing file from pattern %s\" raise ValueError(msg %", "is created. The file name is fixed and its value is `proc.pid`. :param", "'matplotlib' etc. :return: True if module has been imported, False otherwise. \"\"\" have_module", "modify. :param str separator: An item separator. \"\"\" for key in dictionary: if", "not in param_info[name]: logging.warn( \"Parameter definition does not contain type ('type') and/or description", "a key, and second group is considered to be value. Values must be", "of a root key is '%s' but expected is one of '%s'\", key,", "is launched as a subprocess. The thread is reading its output and will", "in glob(os.path.join(p[0], file_name_pattern))] return files @staticmethod def gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get files specified", ":param any data: A data to dump into a JSON file. :param str", "# The 'output' is a string printed out by a resource monitor #", "return str_val elif val_type == 'int': return int(str_val) elif val_type == 'float': return", "usage is to ensure that we can write to this file. If path", "'path' parameter (%s) must point to an existing directory.\" % path) if files", "open(config_file) as file_obj: try: # A part of global configuration from this particular", "loaded from that folder. This method fails if one parameter is defined in", "located in 'path'. If `files` is empty, all json files are loaded from", "dict dictionary: Dictionary to modify. :param str separator: An item separator. \"\"\" for", "Input dictionary. :param list_or_val keys: Keys to extract :rtype: dict :return: Dictionary that", "'%s'\", key, schema[val_type]) if key not in dest: # The key in source", "match.group(2).strip() value = json.loads(value) if len(value) > 0 else None if add_only_keys is", "etc. :param dict config: A dictionary with configuration section that may contain parameters,", "try to import, something like 'numpy', 'pandas', 'matplotlib' etc. :return: True if module", "'dict':['parameters', 'variables'], 'list':['extensions']} else: schema = {'types':(list, basestring, int, float, long)} for key", "created. See documentation for :py:func:`os.makedirs` for more details. :param str file_name: A name", "val_type = type(source[key]).__name__ if not isinstance(source[key], schema['types']): raise ConfigurationError( \"Configuration update error -", "parse JSON string '%s' with key '%s' (key-value definition: '%s'). Error is %s\"", "= json.loads(value) if len(value) > 0 else None if add_only_keys is None or", "system resource consumption. proc_pid date virt res shrd cpu mem power gpus_power \"\"\"", "idx+count) ]) return metrics def remove_pid_file(self): \"\"\"Deletes pif file from disk.\"\"\" try: os.remove(self.pid_file)", "return_key = key[len(prefix):] if remove_prefix else key return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod", "\"\"\"Makes sure that parent folder of this file exists. The file itself may", "subdict(dictionary, keys): \"\"\"Return subdictionary containing only keys from 'keys'. :param dict dictionary: Input", "# (c) Copyright [2017] Hewlett Packard Enterprise Development LP # # Licensed under", "Update parameters info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) # Joing configuration from this single file.", "config, is_user_config=False): \"\"\"Update parameter info dictionary based on configurationi in **config** :param dict", "it into lists and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that maps metric field", "info update error.\" \" Parameter redefinition is not allowed for non-user configuration.\" \"", "in clean_config: params = clean_config['parameters'] for name in params: val = params[name] if", "deal with it in update_param_info, but just in case if 'val' not in", "dictionary into **dest** dictionary assuming source and dest are JSON configuration configs or", "of parsing a JSON configuration file. :param bool is_user_config: If True, the config", "it in slightly different way. If parameter in `config` exists in param_info, it", "in param_info[name]: logging.warn( \"Parameter definition does not contain type ('type') and/or description ('desc').\"", "in source: # Firstly, check that type of value is expected. val_type =", "= int(fields_split[3]) self.fields[field_name] = { 'type': field_type, 'index': index, 'count': count } @staticmethod", "function and most likely should not be used. \"\"\" with open(self.pid_file, 'w') as", "finish.\"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object):", "return file_name attempt = 0 while True: candidate_file_name = \"%s.%d\" % (file_name, attempt)", "and *must_match* is True, *ConfigurationError* exception is thrown. Regexp pattern must return two", "parameter redefinition is prohibited. If `parameters` section in `config` redefines existing parameters in", "error.\" \" Parameter that is defined by a dictionary must contain 'val' field", "if both_lists else source[key] class ResourceMonitor(object): \"\"\"The class is responsible for launching/shutting down/communicating", "assert v in ('true', 'false', '1', '0', 'on', 'off'),\\ \"Invalid boolean value in", "if files is not None: config_files = [os.path.join(path, f) for f in files]", "find non existing file from pattern %s\" raise ValueError(msg % file_name) @staticmethod def", "file here, but create or empty it in host OS. \"\"\" self.empty_pid_file() self.queue", "list of file names / directories. :param str file_name_pattern: A file name pattern", "= params[name] if isinstance(val, dict): # This should not generally happen since we", "KIND, either express or implied. # See the License for the specific language", "(key-value definition: '%s'). Error is %s\" % (value, key, line, str(err))) @staticmethod def", "specification (%s). Must be name:type:index, name:type:index: or name:type:index:count\" % raw_field field_name = fields_split[0]", "leaving only their values :param dict config: A dictionary with configuration section that", "update parameter info structure, but deal with it in slightly different way. If", "with *file_name* name. :param dict dictionary: Dictionary to serialize. :param str file_name: Name", "exist in dictionary with the same value to match. If policy is 'relaxed',", "defined either by user in a standard way as we define types for", "file itself may not exist. A typical usage is to ensure that we", "self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname, self.__flags[0]) else: self.__fobj = open(self.__fname, self.__flags[1]) return self.__fobj def", "config_section = json.load(file_obj) # Update parameters info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) # Joing configuration", "params[name] if not is_user_config: # If this is not a user-provided configuration, we", "must be a tuple.\" if not fname.endswith(extensions): raise ValueError(\"Invalid file extension (%s). Must", "list add_only_keys: If not None, specifies keys that are added into\\ *dictionary*. Others", "= {} # Information on params such as type and help messages for", "exists. One use case to use this method is to populate a dictionary", "serialie dictionary in. \"\"\" if fname is None: raise ValueError(\"File name is None\")", "in *dictionary* are overwritten with new ones if key already exists. One use", "'w') as fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object): \"\"\"A private class", "copy.deepcopy(val) # New parameter, set it info object. # TODO what about parameter", "= fields_split[1] assert field_type in ('str', 'int', 'float', 'bool'),\\ \"Invalid field type (%s).", "into lists and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that maps metric field to", "OpenFile(fname, 'w') as fobj: json.dump(data, fobj, indent=4) class DictUtils(object): \"\"\"Container for dictionary helpers.\"\"\"", "end with '.json' or '.json.gz'. :rtype: None or JSON object :return: None of", "= self.fields[field]['index'] count = self.fields[field]['count'] if count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count", "populate a dictionary with key-values from log files. :param dict dictionary: Dictionary to", "configuration. In this case, we still update parameter info structure, but deal with", "else [keys] for key in keys: if key not in dictionary: return False", "for matching items in ``iterable``. :param bool must_match: Specifies if every element in", "\" Parameter %s=%s, new parameter definition (value) is %s\" % (name, str(param_info[name]), val)", "not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int, float, long]') dest[key] = copy.deepcopy(source[key])", "if it does not exist. \"\"\" if key not in dictionary: dictionary[key] =", "always match, else exception is thrown. if is_root: schema = {'types':(dict, list), 'dict':['parameters',", "and return boolean variable indicating if import has been succesfull or not. Used", "The thread is reading its output and will put the data into a", "ignored. :param boolean ignore_erros: If true, ignore errors. :raises ConfigurationError: If *must_match* is", "Queue from glob import glob from dlbs.exceptions import ConfigurationError class OpenFile(object): \"\"\"Class that", "this file exists. The file itself may not exist. A typical usage is", "file_name is not None: IOUtils.mkdirf(file_name) with open(file_name, 'w') as file_obj: json.dump(dictionary, file_obj, indent=4)", "'strict'] policy: Policy to match. :param dict matches: Dictionary where matches will be", "field to a time series of its value. \"\"\" metrics = {} for", "by user in a standard way as we define types for standard parameters", "file. \"\"\" if fname is None: return None if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz'))", "path to resource monitor script. :param str pid_folder: A full path to folder", "def contains(dictionary, keys): \"\"\"Checkes if dictionary contains all keys in 'keys' :param dict", "in param_info, it means user has provided their specific value for this parameter.", "values :param dict config: A dictionary with configuration section that may contain parameters,", "file_name_pattern, recursively=False): \"\"\"Find/get files specified by an `inputs` parameter. :param list path_specs: A", "ANY KIND, either express or implied. # See the License for the specific", "of extensions to use. Raises exception of fname does not end with one", "**config** is a result of parsing a JSON configuration file. :param bool is_user_config:", "messages for config_file in config_files: if not os.path.isfile(config_file): raise ValueError(\"Configuration load error. Configuration", "program terminates. 2. Load user-provided configuration. In this case, we still update parameter", ":rtype: None or JSON object :return: None of fname is None else JSON", "on this flag, we deal with parameters in config that redefine parameters in", "fname is None: raise ValueError(\"File name is None\") if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz'))", "is a debugging function and most likely should not be used. \"\"\" with", "A dictionary with configuration section that may contain parameters, variables and extensions. The", "and regular textual files.\"\"\" def __init__(self, fname, mode='r'): self.__fname = fname self.__flags =", "self.__flags[1]) return self.__fobj def __exit__(self, type, value, traceback): self.__fobj.close() class IOUtils(object): \"\"\"Container for", "True, the config object represents user-provided configuration. If False, this is a system", "as fobj: json.dump(data, fobj, indent=4) class DictUtils(object): \"\"\"Container for dictionary helpers.\"\"\" @staticmethod def", "return files @staticmethod def get_non_existing_file(file_name, max_attempts = 1000): \"\"\"Return file name that does", "configuration, we disallow parameter redefinition. if name in param_info: raise ConfigurationError( \"Parameter info", "already exists. One use case to use this method is to populate a", "an `inputs` parameter. :param list path_specs: A list of file names / directories.", "return json.load(fobj) @staticmethod def write_json(fname, data, check_extension=False): \"\"\" Dumps *dictionary* as a json", "are JSON configuration configs or their members. :param dict dest: Merge data to", "def get_non_existing_file(file_name, max_attempts = 1000): \"\"\"Return file name that does not exist. :param", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "to find in dictionary :rtype: boolean :return: True if all keys are in", "val_type, 'desc': \"No description for this parameter provided (it was automatically converted from", "its default value. Found this definition: %s=%s\" % (name, val) ) params[name] =", "True, remove prefix in returned dictionary. :return: New dictionary with items which keys", "textual files.\"\"\" def __init__(self, fname, mode='r'): self.__fname = fname self.__flags = ['rb', 'r']", "those items in *iterable* are considered, that match *pattern* (it's a regexp epression).", "background thread. Due to possible execution of benchmarks in containers, we must not", "0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count) ])", "{} for key in self.fields.keys(): metrics[key] = [] # What's in output: #", "True, scans subfolders as well. :param str directory: A directory to search files", "for the specific language governing permissions and # limitations under the License. \"\"\"Two", "if it does not contain key 'framework' OR contains\\ key 'framework' with value", "If path to parent folder does not exist, it will be created. See", "expected key names. Types must always match, else exception is thrown. if is_root:", "A main thread will then dequeue all data at once once experiment is", "epression). If a particular item does not match, and *must_match* is True, *ConfigurationError*", "ResourceMonitor(object): \"\"\"The class is responsible for launching/shutting down/communicating with external resource manager that", "query are lists, then condition OR applies. For instance: match(dictionary, query = {", "candidate_file_name = \"%s.%d\" % (file_name, attempt) if not os.path.exists(candidate_file_name): return candidate_file_name attempt +=", "likely should not be used. \"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('%d' %", ":param str separator: An item separator. \"\"\" for key in dictionary: if isinstance(dictionary[key],", "names to load. If None, all files with JSON extension in **path** are", "item does not match, and *must_match* is True, *ConfigurationError* exception is thrown. Regexp", "add_only_keys is None or key in add_only_keys: dictionary[key] = value logging.debug(\"Key-value item (%s=%s)", "\"\"\" Converts every value in dictionary that is list to strings. For every", "params), program terminates. 2. Load user-provided configuration. In this case, we still update", "json.dump(data, fobj, indent=4) class DictUtils(object): \"\"\"Container for dictionary helpers.\"\"\" @staticmethod def subdict(dictionary, keys):", "fname.endswith(extensions): raise ValueError(\"Invalid file extension (%s). Must be one of %s\" % extensions)", "match *pattern* (it's a regexp epression). If a particular item does not match,", "two groups (1 and 2). First group is considered as a key, and", "\" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)\" % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__) )", "if type of a value is 'list', converts this list into a string", "else: dest[key].extend(source[key]) else: if not both_lists and not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list,", "'int', 'float', 'bool'),\\ \"Invalid field type (%s). Must be one of ('str', 'int',", "not create queue and process. :param str launcher: A full path to resource", "dictionary: if key.startswith(prefix): return_key = key[len(prefix):] if remove_prefix else key return_dictionary[return_key] = copy.deepcopy(dictionary[key])", "benchmarks in containers, we must not delete file here, but create or empty", "and description. if 'val' not in val: raise ConfigurationError( \"Parameter info update error.\"", "check that type of value is expected. val_type = type(source[key]).__name__ if not isinstance(source[key],", "None of fname is None else JSON loaded from the file. \"\"\" if", "is used for matching. It's assuemd we match primitive types such as numbers", "output = process.stdout.readline() if output == '' and process.poll() is not None: break", "return float(str_val) elif val_type == 'bool': v = str_val.lower() assert v in ('true',", "name not in param_info: param_info[name] = copy.deepcopy(val) # New parameter, set it info", "@staticmethod def match(dictionary, query, policy='relaxed', matches=None): \"\"\" Match *query* against *dictionary*. The *query*", "dictionary *dictionary* contains key *key* If key does not exist, it adds a", "with it in slightly different way. If parameter in `config` exists in param_info,", "is one of %s\" % \\ (str(is_root), key, val_type, str(schema['types'])) ) # So,", "queue and waits for resource monitor to finish.\"\"\" with open(self.pid_file, 'w') as fhandle:", "'desc': \"No description for this parameter provided (it was automatically converted from its", "'path'. If `files` is empty, all json files are loaded from that folder.", "value is an empty string if value != dictionary[field]: return False elif matches", "fname is None: return assert isinstance(extensions, tuple), \"The 'extensions' must be a tuple.\"", "dict dictionary: Dictionary to search keys in. :param str prefix: Prefix of keys", "key value is suspicious - we can do it only for root. if", "def read_json(fname, check_extension=False): \"\"\"Reads JSON object from file 'fname'. :param str fname: File", "float, long)} for key in source: # Firstly, check that type of value", "@staticmethod def monitor_function(launcher, pid_file, frequency, queue): \"\"\"A main monitor worker function. :param str", "dictionary: Dictionary to modify. :param str separator: An item separator. \"\"\" for key", "if len(value) > 0 else None if add_only_keys is None or key in", "regexp epression). If a particular item does not match, and *must_match* is True,", "loaded params), program terminates. 2. Load user-provided configuration. In this case, we still", "in seconds. Can be something like 0.1 seconds \"\"\" self.launcher = launcher self.pid_file", "regexp pattern for matching items in ``iterable``. :param bool must_match: Specifies if every", "file_obj: try: # A part of global configuration from this particular file config_section", "root. if is_root and key not in schema[val_type]: logging.warn(\"The name of a root", "If True and not match, raises exception. :param list add_only_keys: If not None,", "err: if not ignore_errors: raise ConfigurationError(\"Cannot parse JSON string '%s' with key '%s'", "a dictionary must contain 'val' field that\" \" defines its default value. Found", "we can disable certain functionality if something is missing. \"\"\" HAVE_NUMPY = _ModuleImporter.try_import('numpy')", "Copyright [2017] Hewlett Packard Enterprise Development LP # # Licensed under the Apache", "self.queue.empty(): data = self.queue.get().strip().split() for field in self.fields: tp = self.fields[field]['type'] idx =", "copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary, separator=' '): \"\"\" Converts every value in dictionary that", "lists and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that maps metric field to a", "name:type:index, name:type:index: or name:type:index:count\" % raw_field field_name = fields_split[0] assert field_name not in", "None: return True keys = keys if isinstance(keys, list) else [keys] for key", "with open(config_file) as file_obj: try: # A part of global configuration from this", "tuple), \"The 'extensions' must be a tuple.\" if not fname.endswith(extensions): raise ValueError(\"Invalid file", "\"\"\" Match *query* against *dictionary*. The *query* and *dictionary* are actually dictionaries. If", "in path_specs that are directories. :param bool recursively: If True, search in subdirectories.", "get_measurements(self): \"\"\"Dequeue all data, put it into lists and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return:", "[16, 32] }, policy='strict') Match dictionary only if it (a) contains key 'framework'", "resource monitor but does not create queue and process. :param str launcher: A", "in path_specs that are directories. :return: List of file names satisfying *file_name_pattern* pattern.", "match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='relaxed') Match dictionary if it does", "def lists_to_strings(dictionary, separator=' '): \"\"\" Converts every value in dictionary that is list", "specific language governing permissions and # limitations under the License. \"\"\"Two classes are", "particular file config_section = json.load(file_obj) # Update parameters info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) #", "return two groups (1 and 2). First group is considered as a key,", "remove error.\" \" Parameter that is defined by a dictionary must contain 'val'", "info update error.\" \" Parameter that is defined by a dictionary must contain", "experiment is completed. \"\"\" cmd = [ launcher, pid_file, '', str(frequency) ] process", "way as we define types for standard parameters or induced automatically based on", "to modify. :param str separator: An item separator. \"\"\" for key in dictionary:", "info leaving only their values :param dict config: A dictionary with configuration section", "is_root: schema = {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']} else: schema = {'types':(list, basestring,", "%s\" % (param_info[name]['type'], name, param_info[name]) ) if 'type' not in param_info[name] or 'desc'", "= fname self.__flags = ['rb', 'r'] if mode == 'r' else ['wb', 'w']", "new dictionary will not contain this prefix. The dictionary *dictionary* is not modified.", "if match has been identified. :return: True if match or query is None", ") @staticmethod def remove_info(config): \"\"\"In parameter section of a **config** the function removes", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "OR applies. For instance: match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='strict') Match", ":param dict query: Query to use. :param ['relaxed', 'strict'] policy: Policy to match.", "configuration section that may contain parameters, variables and extensions. The **config** is a", "assuemd we match primitive types such as numbers and strings not lists or", "key, line, str(err))) @staticmethod def match(dictionary, query, policy='relaxed', matches=None): \"\"\" Match *query* against", "value is `proc.pid`. :param float frequency: A sampling frequency in seconds. Can be", "= %s\", name, param_info[name] ) @staticmethod def remove_info(config): \"\"\"In parameter section of a", "raises exception. :param list add_only_keys: If not None, specifies keys that are added", "if not match: if must_match: raise ConfigurationError(\"Cannot match key-value from '%s' with pattern", "a JSON configuration file. :param bool is_user_config: If True, the config object represents", "\"\"\" if key not in dictionary: dictionary[key] = copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary, separator='", "value, help message, type, constraints etc. :param dict config: A dictionary with configuration", "and *source** are root configuration objects. False if these objects are members. \"\"\"", "If True, search in subdirectories. :return: List of file names satisfying *file_name_pattern* pattern.", "*dictionary* with items from *iterable* object. This object must support ``for something in", "resource monitor script. :param str pid_folder: A full path to folder where pid", "\"\"\" for key in dictionary: if isinstance(dictionary[key], list): dictionary[key] = separator.join(str(elem) for elem", "True and not match, raises exception. :param list add_only_keys: If not None, specifies", "\"\"\"A main monitor worker function. :param str launcher: A full path to resource", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", ":param str path: Path to load configurations from :param list files: List of", "for line in iterable: match = matcher.match(line) if not match: if must_match: raise", "not exist, it adds a new item with value *default_value*. The dictionary is", "in ('true', 1, 'on') else: assert False, \"Invalid value type %s\" % val_type", "for key in self.fields.keys(): metrics[key] = [] # What's in output: # proc_pid", "None if add_only_keys is None or key in add_only_keys: dictionary[key] = value logging.debug(\"Key-value", "will then dequeue all data at once once experiment is completed. \"\"\" cmd", "get_non_existing_file(file_name, max_attempts = 1000): \"\"\"Return file name that does not exist. :param str", "check_extension: If True, raises exception if fname does not end with '.json' or", "governing permissions and # limitations under the License. \"\"\"Two classes are define here", "file config_section = json.load(file_obj) # Update parameters info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) # Joing", "name etc). :param str patter: A regexp pattern for matching items in ``iterable``.", "method is used: 1. Load standard configuration. In this case, parameter redefinition is", "printed out by a resource monitor # script. It's a whitespace separated string", "applicable law or agreed to in writing, software # distributed under the License", "# Just parameter value val_type = 'str' if isinstance(val, basestring) or isinstance(val, list)", "max_attempts = 1000): \"\"\"Return file name that does not exist. :param str file_name:", "ConfigurationError(\"Cannot parse JSON string '%s' with key '%s' (key-value definition: '%s'). Error is", "that we can write to this file. If path to parent folder does", "key, src_val_type.__name__) ) # Types and expected key names. Types must always match,", "% (file_name, attempt) if not os.path.exists(candidate_file_name): return candidate_file_name attempt += 1 if attempt", "exception if fname does not end with '.json' or '.json.gz'. :rtype: None or", "a file to serialie dictionary in. \"\"\" if fname is None: raise ValueError(\"File", "return boolean variable indicating if import has been succesfull or not. Used by", "pif file from disk.\"\"\" try: os.remove(self.pid_file) except OSError: pass def empty_pid_file(self): \"\"\"Empty pid", "os.path.join(pid_folder, 'proc.pid') self.frequency = frequency self.queue = None self.monitor_process = None # Parse", "New parameter, set it info object. # TODO what about parameter type and", "False, this is a system configuration. Based on this flag, we deal with", "value if isinstance(value, list) else [value] if dictionary[field] not in values: return False", "pid_file, '', str(frequency) ] process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output", "from user configuration, update its value else: # Just parameter value val_type =", "defines its default value. Found this definition: %s=%s\" % (name, val) ) params[name]", "file_obj: json.dump(dictionary, file_obj, indent=4) @staticmethod def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False): \"\"\"", "items from *dictionary* which keys names starts with *prefix*. If *remove_prefix* is True,", "fname has one of the provided extensions. :param str fname: The file name", ":param bool is_root: True if **dest** and *source** are root configuration objects. False", "in param_info[name] or 'desc' not in param_info[name]: logging.warn( \"Parameter definition does not contain", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "provided extensions. :param str fname: The file name to check. :param tuple extensions:", "is a complete parameter definition with name, value and description. if 'val' not", "isinstance(extensions, tuple), \"The 'extensions' must be a tuple.\" if not fname.endswith(extensions): raise ValueError(\"Invalid", "not in dest: # The key in source dictionary is not in destination", "pass except IOError: pass def write_pid_file(self, pid): \"\"\"Write the pid into pid file.", "A sampling frequency in seconds. Can be something like 0.1 seconds \"\"\" self.launcher", "the same value to match. If policy is 'relaxed', dictionary may not contain", "ConfigurationError class OpenFile(object): \"\"\"Class that can work with gzipped and regular textual files.\"\"\"", "about parameter type and description? else: logging.warn( \" Parameter (%s) entirely redefines existing", "objects are members. \"\"\" def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types): raise ConfigurationError( \"Configuration update", "except ValueError as err: if not ignore_errors: raise ConfigurationError(\"Cannot parse JSON string '%s'", "'str', 'float', 'bool'): raise ConfigurationError( \"Parameter info update error.\" \" Parameter has invalid", "config: A dictionary with configuration section that may contain parameters, variables and extensions.", "raise return (config_files, config, param_info) @staticmethod def update_param_info(param_info, config, is_user_config=False): \"\"\"Update parameter info", "% (field, index+1)] = group continue return True class ConfigurationLoader(object): \"\"\"Loads experimenter configuration", "are in this list. Existing items in *dictionary* are overwritten with new ones", "True class ConfigurationLoader(object): \"\"\"Loads experimenter configuration from multiple files.\"\"\" @staticmethod def load(path, files=None):", "full path to resource monitor script. :param str pid_folder: A full path to", "file.\"\"\" try: with open(self.pid_file, 'w'): pass except IOError: pass def write_pid_file(self, pid): \"\"\"Write", "be a json-parseable strings. If *add_only_keys* is not None, only those items are", "dictionaries. If values in query are lists, then condition OR applies. For instance:", "match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='strict') Match dictionary only if it", "writing, software # distributed under the License is distributed on an \"AS IS\"", "JSON configuration configs or their members. :param dict dest: Merge data to this", "_ModuleImporter(object): \"\"\"A private class that imports a particular models and return boolean variable", "frequency: A sampling frequency in seconds. Can be something like 0.1 seconds :param", "Iterable object (list, opened file name etc). :param str patter: A regexp pattern", "ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) # Joing configuration from this single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except", "that imports a particular models and return boolean variable indicating if import has", "from :param list files: List of file names to load. If None, all", "% (name, val) ) params[name] = val['val'] return clean_config @staticmethod def update(dest, source,", "*dictionary* which keys names starts with *prefix*. If *remove_prefix* is True, keys in", "directory.\" % path) if files is not None: config_files = [os.path.join(path, f) for", "name is fixed and its value is `proc.pid`. :param float frequency: A sampling", "if isinstance(val, dict): # This should not generally happen since we deal with", "not modified. :param dict dictionary: Dictionary to search keys in. :param str prefix:", "if something is missing. \"\"\" HAVE_NUMPY = _ModuleImporter.try_import('numpy') HAVE_PANDAS = _ModuleImporter.try_import('pandas') HAVE_MATPLOTLIB =", "= Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency, self.queue) ) self.monitor_process.start() def stop(self): \"\"\"Closes queue", "object (dictionary) and dictionary of parameters info This method loads configuration files located", "compliance with the License. # You may obtain a copy of the License", "file name that does not exist. :param str file_name: Input file name. :rtype:", "keys is None \"\"\" if keys is None: return True keys = keys", "key *key* If key does not exist, it adds a new item with", "only if it (a) contains key 'framework' with value \"tensorflow\" OR \"caffe2\"\\ and", "val: raise ConfigurationError( \"Parameter info update error.\" \" Parameter that is defined by", "config object represents user-provided configuration. If False, this is a system configuration. Based", "pid: A pid to write. This is a debugging function and most likely", "does not exist. :param str file_name: Input file name. :rtype: str :return: The", "update_param_info, but just in case if 'val' not in val: raise ConfigurationError( \"Parameter", "for path_spec in path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif os.path.isfile(path_spec): files.append(path_spec) return", "of file names satisfying *file_name_pattern* pattern. \"\"\" if not recursively: files = [f", "to check. :param tuple extensions: A tuple of extensions to use. Raises exception", "dictionary) @staticmethod def contains(dictionary, keys): \"\"\"Checkes if dictionary contains all keys in 'keys'", "consisting of a list of config files, configuration object (dictionary) and dictionary of", "Must be name:type:index, name:type:index: or name:type:index:count\" % raw_field field_name = fields_split[0] assert field_name", "bool must_match: Specifies if every element in *iterable* must match\\ *pattern*. If True", "match key-value from '%s' with pattern '%s'. Must match is set to true\"", "communicate measurements. A resource monitor is launched as a subprocess. The thread is", "delete file here, but create or empty it in host OS. \"\"\" self.empty_pid_file()", "Only used for entries in path_specs that are directories. :return: List of file", "and added to dictionary\", key, str(value)) except ValueError as err: if not ignore_errors:", "if isinstance(val, dict): # This is a complete parameter definition with name, value", "find first file name that file does not exist. \"\"\" if not os.path.exists(file_name):", "from query to be matched. In this case, the intersection of keys in", "'false', '1', '0', 'on', 'off'),\\ \"Invalid boolean value in string (%s)\" % str_val", "on. They are optional, so we can disable certain functionality if something is", "pattern must return two groups (1 and 2). First group is considered as", "values in query are lists, then condition OR applies. For instance: match(dictionary, query", "If True, the config object represents user-provided configuration. If False, this is a", "self.remove_pid_file() class _ModuleImporter(object): \"\"\"A private class that imports a particular models and return", "\"\"\"Class that can work with gzipped and regular textual files.\"\"\" def __init__(self, fname,", "import os import copy import json import gzip import re import logging import", "contain this prefix. The dictionary *dictionary* is not modified. :param dict dictionary: Dictionary", "are interested here only in parameters section where parameter information is defined. There", "\"\"\" if not os.path.exists(file_name): return file_name attempt = 0 while True: candidate_file_name =", "and key not in schema[val_type]: logging.warn(\"The name of a root key is '%s'", "fname is None else JSON loaded from the file. \"\"\" if fname is", "with value \"tensorflow\" OR\\ \"caffe2\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"], \"batch\":", "their specific value for this parameter. Types of user defined parameters are defined", "schema = {'types':(list, basestring, int, float, long)} for key in source: # Firstly,", "and :py:class:`dlbs.DictUtils`. \"\"\" import os import copy import json import gzip import re", "contain type ('type') and/or description ('desc').\" \" You should fix this. Parameter definition", "return files @staticmethod def gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get files specified by an `inputs`", "of '%s'\", key, schema[val_type]) if key not in dest: # The key in", "should not be used. \"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('%d' % pid)", "limitations under the License. \"\"\"Two classes are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\"", "for key in keys. \"\"\" if keys is None: return dictionary return dict((k,", "key does not exist, it adds a new item with value *default_value*. The", "output: # The 'output' is a string printed out by a resource monitor", "= [] # What's in output: # proc_pid date virt res shrd cpu", "Due to possible execution of benchmarks in containers, we must not delete file", "not fname.endswith(extensions): raise ValueError(\"Invalid file extension (%s). Must be one of %s\" %", "items from *iterable* object. This method modifies/updates *dictionary* with items from *iterable* object.", "can do it only for root. if is_root and key not in schema[val_type]:", "this list into a string using separator *separator*. The dictictionary is modified in-place.", "dictionary. dest[key] = copy.deepcopy(source[key]) else: # The key from source is in dest.", "f) for f in os.listdir(path) if f.endswith('.json')] config = {} # Configuration with", "use this method is to populate a dictionary with key-values from log files.", "Existing items in *dictionary* are overwritten with new ones if key already exists.", "that enumerates non-standard python modules this project depends on. They are optional, so", "match, and *must_match* is True, *ConfigurationError* exception is thrown. Regexp pattern must return", "Invalid JSON configuration in file %s\", config_file) raise return (config_files, config, param_info) @staticmethod", "here only in parameters section where parameter information is defined. There are two", "in query.iteritems(): if field not in dictionary: if policy == 'relaxed': continue else:", "is %s\" % (name, str(param_info[name]), val) ) if isinstance(val, dict): # This is", "json import gzip import re import logging import subprocess import importlib from multiprocessing", "dictionary: Dictionary to update in-place. :param obj iterable: Iterable object (list, opened file", "field_name = fields_split[0] assert field_name not in self.fields,\\ \"Found duplicate timeseries field (%s)\"", "search in subdirectories. :return: List of file names satisfying *file_name_pattern* pattern. \"\"\" if", "configuration error that must not happen.\" \" Parameter %s=%s, new parameter definition (value)", "The 'file_name' if this file does not exist else find first file name", "mkdirf(file_name): \"\"\"Makes sure that parent folder of this file exists. The file itself", "to be value. Values must be a json-parseable strings. If *add_only_keys* is not", "files = [f for p in os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))] return", "of the extensions. \"\"\" if fname is None: return assert isinstance(extensions, tuple), \"The", "run(self): \"\"\"Create queue and start resource monitor in background thread. Due to possible", "params[name] = val['val'] return clean_config @staticmethod def update(dest, source, is_root=True): \"\"\"Merge **source** dictionary", "in dictionary: dictionary[key] = copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary, separator=' '): \"\"\" Converts every", "value in dictionary that is list to strings. For every item in *dictionary*,", "= separator.join(str(elem) for elem in dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates new", "\"\"\" def __init__(self, launcher, pid_folder, frequency, fields_specs): \"\"\"Initializes resource monitor but does not", "once once experiment is completed. \"\"\" cmd = [ launcher, pid_file, '', str(frequency)", "exist. :param str file_name: Input file name. :rtype: str :return: The 'file_name' if", "= isinstance(dest[key], list) and isinstance(source[key], list) both_primitive = type(dest[key]) is type(source[key]) and isinstance(dest[key],", "except ValueError: logging.error(\"Configuration load error. Invalid JSON configuration in file %s\", config_file) raise", "index+1)] = group continue return True class ConfigurationLoader(object): \"\"\"Loads experimenter configuration from multiple", "parameter redefinition. if name in param_info: raise ConfigurationError( \"Parameter info update error.\" \"", "= { 'val': val, 'type': val_type, 'desc': \"No description for this parameter provided", "has one of the provided extensions. :param str fname: The file name to", "description? else: logging.warn( \" Parameter (%s) entirely redefines existing parameter (%s).\" \" Normally,", "*file_name_pattern* pattern. \"\"\" files = [] for path_spec in path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec,", "be something like 0.1 seconds \"\"\" self.launcher = launcher self.pid_file = os.path.join(pid_folder, 'proc.pid')", "pattern in folder *directory*. If *recursively* is True, scans subfolders as well. :param", "now (this also applies for update_param_info method). \"\"\" if path is None: raise", "to import, something like 'numpy', 'pandas', 'matplotlib' etc. :return: True if module has", "(the \"License\"); # you may not use this file except in compliance with", "= matcher.match(line) if not match: if must_match: raise ConfigurationError(\"Cannot match key-value from '%s'", "been parsed and added to dictionary\", key, str(value)) except ValueError as err: if", "it in host OS. \"\"\" self.empty_pid_file() self.queue = Queue() self.monitor_process = Process( target=ResourceMonitor.monitor_function,", "if dictionary[field] not in values: return False if matches is not None: matches['%s_0'", "this file. If path to parent folder does not exist, it will be", "*dictionary*, that are in this list. Existing items in *dictionary* are overwritten with", "type(val).__name__ if name not in param_info: param_info[name] = { 'val': val, 'type': val_type,", "dictionary that is list to strings. For every item in *dictionary*, if type", "clean_config = copy.deepcopy(config) if 'parameters' in clean_config: params = clean_config['parameters'] for name in", "def subdict(dictionary, keys): \"\"\"Return subdictionary containing only keys from 'keys'. :param dict dictionary:", "in output: # proc_pid date virt res shrd cpu mem power gpus_power while", ":param dict dictionary: Dictionary to serialize. :param any data: A data to dump", "**config** :param dict param_info: A parameter info dictionary that maps parameter name to", "case if 'val' not in val: raise ConfigurationError( \"Parameter info remove error.\" \"", "the file for which we want to make sure\\ its parent directory exists.", "# Unless required by applicable law or agreed to in writing, software #", "# This is a complete parameter definition with name, value and description. if", "by applicable law or agreed to in writing, software # distributed under the", "*file_name* name. :param dict dictionary: Dictionary to serialize. :param str file_name: Name of", "else: continue key = match.group(1).strip() try: value = match.group(2).strip() value = json.loads(value) if", "not in dictionary: dictionary[key] = copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary, separator=' '): \"\"\" Converts", "Types and expected key names. Types must always match, else exception is thrown.", "try: # A part of global configuration from this particular file config_section =", "and *dictionary* are actually dictionaries. If policy is 'strict', every key in query", "\"\"\" clean_config = copy.deepcopy(config) if 'parameters' in clean_config: params = clean_config['parameters'] for name", "tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count) ]) return metrics", "The **config** is a result of parsing a JSON configuration file. :param bool", "parameter (%s) must point to an existing directory.\" % path) if files is", "return params = config['parameters'] for name in params: val = params[name] if not", "*dictionary* contains key *key* If key does not exist, it adds a new", "res shrd cpu mem power gpus_power while not self.queue.empty(): data = self.queue.get().strip().split() for", "files=None): \"\"\"Loads configurations (normally in `conigs`) folder. :param str path: Path to load", "So, the type is expected. Warn if key value is suspicious - we", "is_user_config: If True, the config object represents user-provided configuration. If False, this is", "its description dictionary that contains such fileds as value, help message, type, constraints", "*dictionary* as a json object to a file with *file_name* name. :param dict", "param_info differently. See comments below. We are interested here only in parameters section", "in raw_fields: fields_split = raw_field.split(':') assert len(fields_split) in (3, 4),\\ \"Invalid format of", "= 0 else: count = int(fields_split[3]) self.fields[field_name] = { 'type': field_type, 'index': index,", "*must_match* is True and not match or if value\\ is not a json-parseable", "dest. both_dicts = isinstance(dest[key], dict) and isinstance(source[key], dict) both_lists = isinstance(dest[key], list) and", "file except in compliance with the License. # You may obtain a copy", "certain functionality if something is missing. \"\"\" HAVE_NUMPY = _ModuleImporter.try_import('numpy') HAVE_PANDAS = _ModuleImporter.try_import('pandas')", "*query* against *dictionary*. The *query* and *dictionary* are actually dictionaries. If policy is", "False, \"Invalid value type %s\" % val_type def get_measurements(self): \"\"\"Dequeue all data, put", "A file name pattern to search. For instance, is can be '*.log' :param", "case to use this method is to populate a dictionary with key-values from", "a subprocess. The thread is reading its output and will put the data", "isinstance(dest[key], (basestring, int, float, long)) if is_root: if not both_dicts and not both_lists:", ":param boolean check_extension: If True, raises exception if fname does not end with", "[value] if dictionary[field] not in values: return False if matches is not None:", "names. Types must always match, else exception is thrown. if is_root: schema =", "not match: if must_match: raise ConfigurationError(\"Cannot match key-value from '%s' with pattern '%s'.", "separator *separator*. The dictictionary is modified in-place. :param dict dictionary: Dictionary to modify.", "as file_obj: try: # A part of global configuration from this particular file", "not contain this prefix. The dictionary *dictionary* is not modified. :param dict dictionary:", "policy: Policy to match. :param dict matches: Dictionary where matches will be stored", "else type(val).__name__ if name not in param_info: param_info[name] = { 'val': val, 'type':", "elif matches is not None: matches['%s_0' % (field)] = dictionary[field] continue else: match", "definition with name, value and description. if 'val' not in val: raise ConfigurationError(", "`proc.pid`. :param float frequency: A sampling frequency in seconds. Can be something like", "Input file name. :rtype: str :return: The 'file_name' if this file does not", "be extracted. :param bool remove_prefix: If True, remove prefix in returned dictionary. :return:", "%s=%s\" % (name, val) ) params[name] = val['val'] return clean_config @staticmethod def update(dest,", "in self.fields,\\ \"Found duplicate timeseries field (%s)\" % field_name field_type = fields_split[1] assert", "break if output: # The 'output' is a string printed out by a", "be one of %s\" % extensions) @staticmethod def read_json(fname, check_extension=False): \"\"\"Reads JSON object", "data cannot be loaded for not a file (%s)\" % config_file) with open(config_file)", "separator.join(str(elem) for elem in dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates new dictionary", "parameters in config that redefine parameters in existing param_info differently. See comments below.", "not in schema[val_type]: logging.warn(\"The name of a root key is '%s' but expected", "boolean check_extension: If True, raises exception if fname does not end with '.json'", "contains key 'framework' with value \"tensorflow\" OR\\ \"caffe2\". match(dictionary, query = { \"framework\":", "string if value != dictionary[field]: return False elif matches is not None: matches['%s_0'", "import glob from dlbs.exceptions import ConfigurationError class OpenFile(object): \"\"\"Class that can work with", "An item separator. \"\"\" for key in dictionary: if isinstance(dictionary[key], list): dictionary[key] =", "must support ``for something in iterable`` (list, opened file etc). Only those items", "If policy is 'relaxed', dictionary may not contain all keys from query to", "value type=%s, expected type is one of %s\" % \\ (str(is_root), key, val_type,", "monitor_function(launcher, pid_file, frequency, queue): \"\"\"A main monitor worker function. :param str launcher: A", "str_val.lower() assert v in ('true', 'false', '1', '0', 'on', 'off'),\\ \"Invalid boolean value", "value for key if it does not exist. \"\"\" if key not in", "json object to a file with *file_name* name. :param dict dictionary: Dictionary to", "if it (a) contains key 'framework' with value \"tensorflow\" OR \"caffe2\"\\ and (b)", "user-provided configuration. If False, this is a system configuration. Based on this flag,", "mode == 'r' else ['wb', 'w'] def __enter__(self): if self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname,", "we disallow parameter redefinition. if name in param_info: raise ConfigurationError( \"Parameter info update", "numbers. queue.put(output.strip()) @staticmethod def str_to_type(str_val, val_type): if val_type == 'str': return str_val elif", "True if all keys are in dictionary or keys is None \"\"\" if", "not exist. A typical usage is to ensure that we can write to", "remove_prefix=True): \"\"\"Creates new dictionary with items which keys start with *prefix*. Creates new", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "*file_name* name. :param dict dictionary: Dictionary to serialize. :param any data: A data", "definition: '%s'). Error is %s\" % (value, key, line, str(err))) @staticmethod def match(dictionary,", "that can work with gzipped and regular textual files.\"\"\" def __init__(self, fname, mode='r'):", "Dictionary to search keys in. :param str prefix: Prefix of keys to be", "= [f for f in glob(os.path.join(directory, file_name_pattern))] else: files = [f for p", "in multiple files. This is intended behaviour for now (this also applies for", ":param str file_name_pattern: A file name pattern to search. Only used for entries", "default value. Found this definition: %s=%s\" % (name, val) ) if name not", "to possible execution of benchmarks in containers, we must not delete file here,", "key if it does not exist. \"\"\" if key not in dictionary: dictionary[key]", "not delete file here, but create or empty it in host OS. \"\"\"", "name. :param boolean check_extension: If True, raises exception if fname does not end", "'bool'),\\ \"Invalid field type (%s). Must be one of ('str', 'int', 'float', 'bool')\"", ":param str fname: File name. :param boolean check_extension: If True, raises exception if", "\"The 'extensions' must be a tuple.\" if not fname.endswith(extensions): raise ValueError(\"Invalid file extension", "param_info[name]: logging.warn( \"Parameter definition does not contain type ('type') and/or description ('desc').\" \"", "f in files] else: config_files = [os.path.join(path, f) for f in os.listdir(path) if", "to parent folder does not exist, it will be created. See documentation for", "queue: A queue to communicate measurements. A resource monitor is launched as a", "assuming source and dest are JSON configuration configs or their members. :param dict", "search in subdirectories. Only used for entries in path_specs that are directories. :return:", "= [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.json')] config = {} #", "type, value, traceback): self.__fobj.close() class IOUtils(object): \"\"\"Container for input/output helpers\"\"\" @staticmethod def mkdirf(file_name):", "A tuple of extensions to use. Raises exception of fname does not end", "must contain 'val' field that\" \" defines its default value. Found this definition:", "*prefix*. If *remove_prefix* is True, keys in new dictionary will not contain this", "'parameters' in clean_config: params = clean_config['parameters'] for name in params: val = params[name]", "dict matches: Dictionary where matches will be stored if match has been identified.", "but\" \" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)\" % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__)", "matches=None): \"\"\" Match *query* against *dictionary*. The *query* and *dictionary* are actually dictionaries.", "# proc_pid date virt res shrd cpu mem power gpus_power while not self.queue.empty():", ") if isinstance(val, dict): # This is a complete parameter definition with name,", "this is a system configuration. Based on this flag, we deal with parameters", "\"\"\" Ensures that the dictionary *dictionary* contains key *key* If key does not", "dictionary\", key, str(value)) except ValueError as err: if not ignore_errors: raise ConfigurationError(\"Cannot parse", "See documentation for :py:func:`os.makedirs` for more details. :param str file_name: A name of", "if policy == 'relaxed': continue else: return False if isinstance(value, list) or not", "two scenarios this method is used: 1. Load standard configuration. In this case,", "dictionary will not contain this prefix. The dictionary *dictionary* is not modified. :param", "folder of this file exists. The file itself may not exist. A typical", "Match dictionary only if it contains key 'framework' with value \"tensorflow\" OR\\ \"caffe2\".", "'str': return str_val elif val_type == 'int': return int(str_val) elif val_type == 'float':", "return False if isinstance(value, list) or not isinstance(value, basestring): values = value if", "all json files are loaded from that folder. This method fails if one", "automatically converted from its value).\" } else: param_info[name]['val'] = val # Do final", "query is used for matching. It's assuemd we match primitive types such as", "strings. If *add_only_keys* is not None, only those items are added to *dictionary*,", "completed. \"\"\" cmd = [ launcher, pid_file, '', str(frequency) ] process = subprocess.Popen(cmd,", "host OS. \"\"\" self.empty_pid_file() self.queue = Queue() self.monitor_process = Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file,", "importlib from multiprocessing import Process from multiprocessing import Queue from glob import glob", "reading its output and will put the data into a queue. A main", "monitor worker function. :param str launcher: A full path to resource monitor script.", "except OSError: pass def empty_pid_file(self): \"\"\"Empty pid file.\"\"\" try: with open(self.pid_file, 'w'): pass", "= gzip.open(self.__fname, self.__flags[0]) else: self.__fobj = open(self.__fname, self.__flags[1]) return self.__fobj def __exit__(self, type,", "The file name to check. :param tuple extensions: A tuple of extensions to", "not in self.fields,\\ \"Found duplicate timeseries field (%s)\" % field_name field_type = fields_split[1]", "Dictionary to check. :param str key: A key that must exist. :param obj", "and not match, raises exception. :param list add_only_keys: If not None, specifies keys", "match. If policy is 'relaxed', dictionary may not contain all keys from query", "logging.warn(\"The name of a root key is '%s' but expected is one of", "This object must support ``for something in iterable`` (list, opened file etc). Only", "are optional, so we can disable certain functionality if something is missing. \"\"\"", "allowed for non-user configuration.\" \" This is a system configuration error that must", "be '*.log' :param bool recursively: If True, search in subdirectories. :return: List of", "all data, put it into lists and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that", "ignore_errors=False): \"\"\" Updates *dictionary* with items from *iterable* object. This method modifies/updates *dictionary*", "where parameter information is defined. There are two scenarios this method is used:", "to try to import, something like 'numpy', 'pandas', 'matplotlib' etc. :return: True if", "True, raises exception if fname does not end with '.json' or '.json.gz'. :rtype:", "Match dictionary if it does not contain key 'framework' OR contains\\ key 'framework'", "multiple files.\"\"\" @staticmethod def load(path, files=None): \"\"\"Loads configurations (normally in `conigs`) folder. :param", "validations if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'):", "name. :param dict dictionary: Dictionary to serialize. :param any data: A data to", "file_name: Name of a file to serialie dictionary in. \"\"\" if fname is", "file for which we want to make sure\\ its parent directory exists. \"\"\"", "(b) it contains key 'batch' with value 16 OR 32. :param dict dictionary:", "one of '%s'\", key, schema[val_type]) if key not in dest: # The key", "value type %s\" % val_type def get_measurements(self): \"\"\"Dequeue all data, put it into", "(%s) entirely redefines existing parameter (%s).\" \" Normally, only value needs to be", "and/or description ('desc').\" \" You should fix this. Parameter definition is\" \" %s", "for entries in path_specs that are directories. :return: List of file names satisfying", "Find files which names satisfy *file_name_pattern* pattern in folder *directory*. If *recursively* is", "define types for standard parameters or induced automatically based on JSON parse result.", "its value else: # Just parameter value val_type = 'str' if isinstance(val, basestring)", "+= 1 if attempt >= max_attempts: msg = \"Cannot find non existing file", "is True and not match or if value\\ is not a json-parseable string.", "to resource monitor script. :param str pid_folder: A full path to folder where", "if keys is None: return True keys = keys if isinstance(keys, list) else", "key=%s, value type=%s, expected type is one of %s\" % \\ (str(is_root), key,", "\"\"\" if keys is None: return dictionary return dict((k, dictionary[k]) for k in", "def check_file_extensions(fname, extensions): \"\"\"Checks that fname has one of the provided extensions. :param", "= re.compile(value).match(dictionary[field]) if not match: return False else: if matches is not None:", "does not end with '.json' or '.json.gz'. :rtype: None or JSON object :return:", "(this also applies for update_param_info method). \"\"\" if path is None: raise ValueError(\"Configuration", "import copy import json import gzip import re import logging import subprocess import", "@staticmethod def ensure_exists(dictionary, key, default_value=None): \"\"\" Ensures that the dictionary *dictionary* contains key", ":param str file_name: Input file name. :rtype: str :return: The 'file_name' if this", "= %s\" % (param_info[name]['type'], name, param_info[name]) ) if 'type' not in param_info[name] or", "in *iterable* must match\\ *pattern*. If True and not match, raises exception. :param", "JSON file. :param str file_name: Name of a file to serialie dictionary in.", "in background thread. Due to possible execution of benchmarks in containers, we must", "If `files` is empty, all json files are loaded from that folder. This", "\"\"\"A class that enumerates non-standard python modules this project depends on. They are", "import ConfigurationError class OpenFile(object): \"\"\"Class that can work with gzipped and regular textual", "files. This is intended behaviour for now (this also applies for update_param_info method).", "if optional python modules are available. \"\"\" @staticmethod def try_import(module_name): \"\"\"Tries to import", "key return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod def dump_json_to_file(dictionary, file_name): \"\"\" Dumps *dictionary*", "Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the", "error. Configuration data cannot be loaded for not a file (%s)\" % config_file)", "defined. There are two scenarios this method is used: 1. Load standard configuration.", "*dictionary* is not modified. :param dict dictionary: Dictionary to search keys in. :param", "None: matches['%s_0' % (field)] = dictionary[field] for index, group in enumerate(match.groups()): matches['%s_%d' %", "prohibited. If `parameters` section in `config` redefines existing parameters in param_info (already loaded", "been succesfull or not. Used by a Modules class to identify if optional", "if key already exists. One use case to use this method is to", "\"tensorflow\". match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='relaxed') Match dictionary if it", "= match.group(1).strip() try: value = match.group(2).strip() value = json.loads(value) if len(value) > 0", "file_name_pattern: A file name pattern to search. For instance, is can be '*.log'", "= open(self.__fname, self.__flags[1]) return self.__fobj def __exit__(self, type, value, traceback): self.__fobj.close() class IOUtils(object):", "this is not a user-provided configuration, we disallow parameter redefinition. if name in", "None # Parse fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {} raw_fields = fields_specs.split(',')", "worker function. :param str launcher: A full path to resource monitor script. :param", "module_name) have_module = False return have_module class Modules(object): \"\"\"A class that enumerates non-standard", "sure that parent folder of this file exists. The file itself may not", "clean_config: params = clean_config['parameters'] for name in params: val = params[name] if isinstance(val,", "not os.path.exists(file_name): return file_name attempt = 0 while True: candidate_file_name = \"%s.%d\" %", "added to *dictionary*, that are in this list. Existing items in *dictionary* are", ":return: A tuple consisting of a list of config files, configuration object (dictionary)", "is defined. There are two scenarios this method is used: 1. Load standard", ":rtype: dict :return: Dictionary that contains key/value pairs for key in keys. \"\"\"", "search files in. :param str file_name_pattern: A file name pattern to search. For", "count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([", "parsing a JSON configuration file. :return: A copy of **config** with info removed", "= {} for key in dictionary: if key.startswith(prefix): return_key = key[len(prefix):] if remove_prefix", "A typical usage is to ensure that we can write to this file.", "logging.debug(\"Key-value item (%s=%s) has been parsed and added to dictionary\", key, str(value)) except", "write to this file. If path to parent folder does not exist, it", "resource consumption. proc_pid date virt res shrd cpu mem power gpus_power \"\"\" def", "elif fields_split[3] == '': count = 0 else: count = int(fields_split[3]) self.fields[field_name] =", "isinstance(source[key], schema['types']): raise ConfigurationError( \"Configuration update error - unexpected type of key value:", "== 'bool': v = str_val.lower() assert v in ('true', 'false', '1', '0', 'on',", "None\") if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname, 'w') as fobj: json.dump(data,", "if it contains key 'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\":", "dict): # This is a complete parameter definition with name, value and description.", "is not modified. :param dict dictionary: Dictionary to search keys in. :param str", "proc_pid date virt res shrd cpu mem power gpus_power while not self.queue.empty(): data", "true\" % (line, pattern)) else: continue key = match.group(1).strip() try: value = match.group(2).strip()", "section in `config` redefines existing parameters in param_info (already loaded params), program terminates.", "the provided extensions. :param str fname: The file name to check. :param tuple", "List of file names satisfying *file_name_pattern* pattern. \"\"\" files = [] for path_spec", "pairs for key in keys. \"\"\" if keys is None: return dictionary return", "\"\"\" @staticmethod def try_import(module_name): \"\"\"Tries to import module. :param str module_name: A name", "not. Used by a Modules class to identify if optional python modules are", "with one of the extensions. \"\"\" if fname is None: return assert isinstance(extensions,", "**config** the function removes parameter info leaving only their values :param dict config:", "count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx,", "keys to be extracted. :param bool remove_prefix: If True, remove prefix in returned", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "of a module to try to import, something like 'numpy', 'pandas', 'matplotlib' etc.", "key in dictionary: if isinstance(dictionary[key], list): dictionary[key] = separator.join(str(elem) for elem in dictionary[key])", "query = { \"framework\": [\"tensorflow\", \"caffe2\"] }, policy='strict') Match dictionary only if it", "query = { \"framework\": [\"tensorflow\", \"caffe2\"], \"batch\": [16, 32] }, policy='strict') Match dictionary", "Dictionary to update in-place. :param obj iterable: Iterable object (list, opened file name", "The *query* and *dictionary* are actually dictionaries. If policy is 'strict', every key", "containers, we must not delete file here, but create or empty it in", "*file_name_pattern* pattern in folder *directory*. If *recursively* is True, scans subfolders as well.", "to serialie dictionary in. \"\"\" if fname is None: raise ValueError(\"File name is", "True, search in subdirectories. Only used for entries in path_specs that are directories.", "dictionary: Input dictionary. :param list_or_val keys: Keys to extract :rtype: dict :return: Dictionary", "serialize. :param str file_name: Name of a file to serialie dictionary in. \"\"\"", "name. :param dict dictionary: Dictionary to serialize. :param str file_name: Name of a", "do it only for root. if is_root and key not in schema[val_type]: logging.warn(\"The", "down/communicating with external resource manager that monitors system resource consumption. proc_pid date virt", "launcher: A full path to resource monitor script. :param str pid_folder: A full", "description. if 'val' not in val: raise ConfigurationError( \"Parameter info update error.\" \"", "Regexp pattern must return two groups (1 and 2). First group is considered", "modifies/updates *dictionary* with items from *iterable* object. This object must support ``for something", "'%s' with pattern '%s'. Must match is set to true\" % (line, pattern))", "ConfigurationLoader.update(dest[key], source[key], is_root=False) else: dest[key].extend(source[key]) else: if not both_lists and not both_primitive: _raise_types_mismatch_config_error(key,", "key in source: # Firstly, check that type of value is expected. val_type", "%s\" % val_type def get_measurements(self): \"\"\"Dequeue all data, put it into lists and", "end with one of the extensions. \"\"\" if fname is None: return assert", "parameters info This method loads configuration files located in 'path'. If `files` is", "Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version", "'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"] }, policy='strict')", "not in val: raise ConfigurationError( \"Parameter info remove error.\" \" Parameter that is", "filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates new dictionary with items which keys start with *prefix*.", "if not os.path.exists(file_name): return file_name attempt = 0 while True: candidate_file_name = \"%s.%d\"", "not in destination dictionary. dest[key] = copy.deepcopy(source[key]) else: # The key from source", "exists. \"\"\" dir_name = os.path.dirname(file_name) if dir_name != '' and not os.path.isdir(dir_name): os.makedirs(dir_name)", "type(source[key]), '[dict, list]') if both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False) else: dest[key].extend(source[key]) else: if not", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration load error. Invalid JSON configuration in file %s\",", "raise ConfigurationError( \"Parameter info update error.\" \" Parameter redefinition is not allowed for", "if **dest** and *source** are root configuration objects. False if these objects are", "a new item with value *default_value*. The dictionary is modified in-place. :param dict", "which names satisfy *file_name_pattern* pattern in folder *directory*. If *recursively* is True, scans", "parameter cannot be None.\") if not os.path.isdir(path): raise ValueError(\"Configuration load error. The 'path'", "with *prefix*. Creates new dictionary with items from *dictionary* which keys names starts", "for now (this also applies for update_param_info method). \"\"\" if path is None:", "ValueError(\"Configuration load error. Configuration data cannot be loaded for not a file (%s)\"", "expecting value types to be same and one of %s but\" \" Dest(key=%s,", "type is expected. Warn if key value is suspicious - we can do", "a string using separator *separator*. The dictictionary is modified in-place. :param dict dictionary:", "dictictionary is modified in-place. :param dict dictionary: Dictionary to modify. :param str separator:", "a list of config files, configuration object (dictionary) and dictionary of parameters info", "to an existing directory.\" % path) if files is not None: config_files =", "A regexp pattern for matching items in ``iterable``. :param bool must_match: Specifies if", "parameter from user configuration, update its value else: # Just parameter value val_type", "32] }, policy='strict') Match dictionary only if it (a) contains key 'framework' with", "attempt) if not os.path.exists(candidate_file_name): return candidate_file_name attempt += 1 if attempt >= max_attempts:", "JSON string '%s' with key '%s' (key-value definition: '%s'). Error is %s\" %", "*iterable* are considered, that match *pattern* (it's a regexp epression). If a particular", "\"\"\" dir_name = os.path.dirname(file_name) if dir_name != '' and not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod", "such fileds as value, help message, type, constraints etc. :param dict config: A", "if is_root and key not in schema[val_type]: logging.warn(\"The name of a root key", "self.fields[field_name] = { 'type': field_type, 'index': index, 'count': count } @staticmethod def monitor_function(launcher,", "user-provided configuration. In this case, we still update parameter info structure, but deal", "is None: raise ValueError(\"Configuration load error. The 'path' parameter cannot be None.\") if", "% (value, key, line, str(err))) @staticmethod def match(dictionary, query, policy='relaxed', matches=None): \"\"\" Match", "name to its description dictionary that contains such fileds as value, help message,", "file_name_pattern, recursively)) elif os.path.isfile(path_spec): files.append(path_spec) return files @staticmethod def get_non_existing_file(file_name, max_attempts = 1000):", "\"\"\"Find/get files specified by an `inputs` parameter. :param list path_specs: A list of", "self.queue = None self.monitor_process = None # Parse fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields", "config['parameters'] for name in params: val = params[name] if not is_user_config: # If", "if f.endswith('.json')] config = {} # Configuration with params/vars/extensions param_info = {} #", "that fname has one of the provided extensions. :param str fname: The file", "(config_files, config, param_info) @staticmethod def update_param_info(param_info, config, is_user_config=False): \"\"\"Update parameter info dictionary based", ") # Types and expected key names. Types must always match, else exception", "field_type index = int(fields_split[2]) if len(fields_split) == 3: count = -1 elif fields_split[3]", "process.poll() is not None: break if output: # The 'output' is a string", "policy='strict') Match dictionary only if it contains key 'framework' with value \"tensorflow\". match(dictionary,", "must_match: raise ConfigurationError(\"Cannot match key-value from '%s' with pattern '%s'. Must match is", "etc). Only those items in *iterable* are considered, that match *pattern* (it's a", "second group is considered to be value. Values must be a json-parseable strings.", "a whitespace separated string of numbers. queue.put(output.strip()) @staticmethod def str_to_type(str_val, val_type): if val_type", "Default value for key if it does not exist. \"\"\" if key not", "with items which keys start with *prefix*. Creates new dictionary with items from", "xrange(idx, idx+count) ]) return metrics def remove_pid_file(self): \"\"\"Deletes pif file from disk.\"\"\" try:", "if import has been succesfull or not. Used by a Modules class to", "start with *prefix*. Creates new dictionary with items from *dictionary* which keys names", "import json import gzip import re import logging import subprocess import importlib from", "to extract :rtype: dict :return: Dictionary that contains key/value pairs for key in", "Match dictionary only if it (a) contains key 'framework' with value \"tensorflow\" OR", "this dictionary. :param dict source: Merge data from this dictionary. :param bool is_root:", "is None: return dictionary return dict((k, dictionary[k]) for k in keys if k", "particular item does not match, and *must_match* is True, *ConfigurationError* exception is thrown.", "remove_info(config): \"\"\"In parameter section of a **config** the function removes parameter info leaving", "parameter provided (it was automatically converted from its value).\" } else: param_info[name]['val'] =", "\"\"\"Merge **source** dictionary into **dest** dictionary assuming source and dest are JSON configuration", "**config** with info removed \"\"\" clean_config = copy.deepcopy(config) if 'parameters' in clean_config: params", "Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0", "does not contain key 'framework' OR contains\\ key 'framework' with value \"tensorflow\". match(dictionary,", "keys in 'keys' :param dict dictionary: Input dictionary. :param list_or_val keys: Keys to", ":param dict config: A dictionary with configuration section that may contain parameters, variables", "__init__(self, fname, mode='r'): self.__fname = fname self.__flags = ['rb', 'r'] if mode ==", "else: logging.warn( \" Parameter (%s) entirely redefines existing parameter (%s).\" \" Normally, only", "the License for the specific language governing permissions and # limitations under the", "if match or query is None :rtype: bool \"\"\" if query is None:", "check_file_extensions(fname, extensions): \"\"\"Checks that fname has one of the provided extensions. :param str", "query is None: return True assert policy in ['relaxed', 'strict'], \"\" for field,", "is not in destination dictionary. dest[key] = copy.deepcopy(source[key]) else: # The key from", "The file name is fixed and its value is `proc.pid`. :param float frequency:", "in `config` redefines existing parameters in param_info (already loaded params), program terminates. 2.", "sampling frequency in seconds. Can be something like 0.1 seconds \"\"\" self.launcher =", "definition: %s=%s\" % (name, val) ) params[name] = val['val'] return clean_config @staticmethod def", "subdirectories. :return: List of file names satisfying *file_name_pattern* pattern. \"\"\" if not recursively:", "not isinstance(source[key], schema['types']): raise ConfigurationError( \"Configuration update error - unexpected type of key", "to *dictionary*, that are in this list. Existing items in *dictionary* are overwritten", "key[len(prefix):] if remove_prefix else key return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod def dump_json_to_file(dictionary,", "value == '': # Take special care if value is an empty string", "in dictionary: if key.startswith(prefix): return_key = key[len(prefix):] if remove_prefix else key return_dictionary[return_key] =", "is None: return None if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with OpenFile(fname, 'r') as", "dictionary: if isinstance(dictionary[key], list): dictionary[key] = separator.join(str(elem) for elem in dictionary[key]) @staticmethod def", "value 16 OR 32. :param dict dictionary: Dictionary to match. :param dict query:", "not in val: raise ConfigurationError( \"Parameter info update error.\" \" Parameter that is", "extensions) @staticmethod def read_json(fname, check_extension=False): \"\"\"Reads JSON object from file 'fname'. :param str", "a value is 'list', converts this list into a string using separator *separator*.", ") if 'type' not in param_info[name] or 'desc' not in param_info[name]: logging.warn( \"Parameter", "self.fields[field]['count'] if count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)])", "in seconds. Can be something like 0.1 seconds :param multiprocessing.Queue queue: A queue", "optional python modules are available. \"\"\" @staticmethod def try_import(module_name): \"\"\"Tries to import module.", "'framework' OR contains\\ key 'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\":", "params: val = params[name] if not is_user_config: # If this is not a", "JSON extension in **path** are loaded. :return: A tuple consisting of a list", "% (field)] = dictionary[field] else: if value == '': # Take special care", "be loaded for not a file (%s)\" % config_file) with open(config_file) as file_obj:", "# So, the type is expected. Warn if key value is suspicious -", "from the file. \"\"\" if fname is None: return None if check_extension: IOUtils.check_file_extensions(fname,", "group is considered as a key, and second group is considered to be", "(file_name, attempt) if not os.path.exists(candidate_file_name): return candidate_file_name attempt += 1 if attempt >=", "and start resource monitor in background thread. Due to possible execution of benchmarks", "update error.\" \" Parameter has invalid type = '%s'.\" \" Parameter definition is", "policy='strict') Match dictionary only if it (a) contains key 'framework' with value \"tensorflow\"", "dict dictionary: Dictionary to serialize. :param str file_name: Name of a file to", "str file_name: Input file name. :rtype: str :return: The 'file_name' if this file", "value).\" } else: param_info[name]['val'] = val # Do final validations if 'type' in", "files = [] for path_spec in path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif", "or keys is None \"\"\" if keys is None: return True keys =", "files located in 'path'. If `files` is empty, all json files are loaded", "separator: An item separator. \"\"\" for key in dictionary: if isinstance(dictionary[key], list): dictionary[key]", "file names satisfying *file_name_pattern* pattern. \"\"\" if not recursively: files = [f for", "- unexpected type of key value: \" \" is_root=%s, key=%s, value type=%s, expected", "class to identify if optional python modules are available. \"\"\" @staticmethod def try_import(module_name):", "Merge data from this dictionary. :param bool is_root: True if **dest** and *source**", "must always match, else exception is thrown. if is_root: schema = {'types':(dict, list),", ":param dict dictionary: Input dictionary. :param list_or_val keys: Keys to find in dictionary", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "\"framework\": [\"tensorflow\", \"caffe2\"] }, policy='strict') Match dictionary only if it contains key 'framework'", "dictionary[key] = copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary, separator=' '): \"\"\" Converts every value in", "and most likely should not be used. \"\"\" with open(self.pid_file, 'w') as fhandle:", "if key.startswith(prefix): return_key = key[len(prefix):] if remove_prefix else key return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return", "may want to fix this.\", json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val'] = val['val'] # Existing", "self.pid_file, self.frequency, self.queue) ) self.monitor_process.start() def stop(self): \"\"\"Closes queue and waits for resource", "keys in. :param str prefix: Prefix of keys to be extracted. :param bool", "def monitor_function(launcher, pid_file, frequency, queue): \"\"\"A main monitor worker function. :param str launcher:", "instance, is can be '*.log' :param bool recursively: If True, search in subdirectories.", "\"tensorflow\" OR\\ \"caffe2\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"], \"batch\": [16, 32]", "\"\"\"Container for input/output helpers\"\"\" @staticmethod def mkdirf(file_name): \"\"\"Makes sure that parent folder of", "param_info[name]['type'] not in ('int', 'str', 'float', 'bool'): raise ConfigurationError( \"Parameter info update error.\"", "dictionary: Dictionary to check. :param str key: A key that must exist. :param", "len(value) > 0 else None if add_only_keys is None or key in add_only_keys:", "frequency self.queue = None self.monitor_process = None # Parse fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8:", "try: os.remove(self.pid_file) except OSError: pass def empty_pid_file(self): \"\"\"Empty pid file.\"\"\" try: with open(self.pid_file,", "bool is_root: True if **dest** and *source** are root configuration objects. False if", "if file_name is not None: IOUtils.mkdirf(file_name) with open(file_name, 'w') as file_obj: json.dump(dictionary, file_obj,", "Existing parameter from user configuration, update its value else: # Just parameter value", "str_val return v in ('true', 1, 'on') else: assert False, \"Invalid value type", "json.loads(value) if len(value) > 0 else None if add_only_keys is None or key", ":return: True if match or query is None :rtype: bool \"\"\" if query", "to finish.\"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class", "itself may not exist. A typical usage is to ensure that we can", "files] else: config_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.json')] config", "in this list. Existing items in *dictionary* are overwritten with new ones if", "that must not happen.\" \" Parameter %s=%s, new parameter definition (value) is %s\"", "you may want to fix this.\", json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val'] = val['val'] #", "# A part of global configuration from this particular file config_section = json.load(file_obj)", "data, check_extension=False): \"\"\" Dumps *dictionary* as a json object to a file with", "is expected. val_type = type(source[key]).__name__ if not isinstance(source[key], schema['types']): raise ConfigurationError( \"Configuration update", "= None # Parse fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {} raw_fields =", "if not match: return False else: if matches is not None: matches['%s_0' %", "pass def write_pid_file(self, pid): \"\"\"Write the pid into pid file. :param int pid:", "%s=%s\" % (name, val) ) if name not in param_info: param_info[name] = copy.deepcopy(val)", "dest[key] = copy.deepcopy(source[key]) if both_lists else source[key] class ResourceMonitor(object): \"\"\"The class is responsible", "language governing permissions and # limitations under the License. \"\"\"Two classes are define", "into\\ *dictionary*. Others are ignored. :param boolean ignore_erros: If true, ignore errors. :raises", "info object. # TODO what about parameter type and description? else: logging.warn( \"", "from '%s' with pattern '%s'. Must match is set to true\" % (line,", "is None: return assert isinstance(extensions, tuple), \"The 'extensions' must be a tuple.\" if", "in val: raise ConfigurationError( \"Parameter info remove error.\" \" Parameter that is defined", "thrown. if is_root: schema = {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']} else: schema =", "'val' not in val: raise ConfigurationError( \"Parameter info update error.\" \" Parameter that", "is considered as a key, and second group is considered to be value.", "in string (%s)\" % str_val return v in ('true', 1, 'on') else: assert", "copy.deepcopy(source[key]) else: # The key from source is in dest. both_dicts = isinstance(dest[key],", "}, policy='strict') Match dictionary only if it contains key 'framework' with value \"tensorflow\".", "keys in dictionary and query is used for matching. It's assuemd we match", "types for standard parameters or induced automatically based on JSON parse result. \"\"\"", "is_root=False) else: dest[key].extend(source[key]) else: if not both_lists and not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]),", "is not None: matches['%s_0' % (field)] = dictionary[field] else: if value == '':", "key that must exist. :param obj default_value: Default value for key if it", "folder where pid file is created. The file name is fixed and its", "from *dictionary* which keys names starts with *prefix*. If *remove_prefix* is True, keys", "'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__ if name not in", "existing parameter (%s).\" \" Normally, only value needs to be provided.\" \" We", "in val: raise ConfigurationError( \"Parameter info update error.\" \" Parameter that is defined", "= 0 while True: candidate_file_name = \"%s.%d\" % (file_name, attempt) if not os.path.exists(candidate_file_name):", "its value is `proc.pid`. :param float frequency: A sampling frequency in seconds. Can", "Version 2.0 (the \"License\"); # you may not use this file except in", "configuration file. :return: A copy of **config** with info removed \"\"\" clean_config =", "file from pattern %s\" raise ValueError(msg % file_name) @staticmethod def check_file_extensions(fname, extensions): \"\"\"Checks", "this prefix. The dictionary *dictionary* is not modified. :param dict dictionary: Dictionary to", "that must exist. :param obj default_value: Default value for key if it does", "ConfigurationError( \"Parameter info update error.\" \" Parameter redefinition is not allowed for non-user", "ConfigurationError: If *must_match* is True and not match or if value\\ is not", "self.fields[field]['type'] idx = self.fields[field]['index'] count = self.fields[field]['count'] if count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp))", "os.path.exists(file_name): return file_name attempt = 0 while True: candidate_file_name = \"%s.%d\" % (file_name,", "match = matcher.match(line) if not match: if must_match: raise ConfigurationError(\"Cannot match key-value from", "or key in add_only_keys: dictionary[key] = value logging.debug(\"Key-value item (%s=%s) has been parsed", "that file does not exist. \"\"\" if not os.path.exists(file_name): return file_name attempt =", "make sure\\ its parent directory exists. \"\"\" dir_name = os.path.dirname(file_name) if dir_name !=", "pattern for matching items in ``iterable``. :param bool must_match: Specifies if every element", "will proceed but you may want to fix this.\", json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val']", "\"\"\" files = [] for path_spec in path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively))", "self.__flags[0]) else: self.__fobj = open(self.__fname, self.__flags[1]) return self.__fobj def __exit__(self, type, value, traceback):", "\"\"\" def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types): raise ConfigurationError( \"Configuration update error - expecting", "support ``for something in iterable`` (list, opened file etc). Only those items in", "ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count) ]) return metrics def remove_pid_file(self): \"\"\"Deletes", "if module has been imported, False otherwise. \"\"\" have_module = True try: importlib.import_module(module_name)", "error. The 'path' parameter (%s) must point to an existing directory.\" % path)", "parameter definition with name, value and description. if 'val' not in val: raise", "None, all files with JSON extension in **path** are loaded. :return: A tuple", "source[key] class ResourceMonitor(object): \"\"\"The class is responsible for launching/shutting down/communicating with external resource", "can work with gzipped and regular textual files.\"\"\" def __init__(self, fname, mode='r'): self.__fname", "value is 'list', converts this list into a string using separator *separator*. The", "None: config_files = [os.path.join(path, f) for f in files] else: config_files = [os.path.join(path,", "# Existing parameter from user configuration, update its value else: # Just parameter", "Name of a file to serialie dictionary in. \"\"\" if fname is None:", "This is a system configuration error that must not happen.\" \" Parameter %s=%s,", "module to try to import, something like 'numpy', 'pandas', 'matplotlib' etc. :return: True", "= key[len(prefix):] if remove_prefix else key return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod def", ":return: True if all keys are in dictionary or keys is None \"\"\"", "is a string printed out by a resource monitor # script. It's a", "this.\", json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val'] = val['val'] # Existing parameter from user configuration,", "for this parameter. Types of user defined parameters are defined either by user", "ValueError(msg % file_name) @staticmethod def check_file_extensions(fname, extensions): \"\"\"Checks that fname has one of", "numbers and strings not lists or dictionaries. If values in query are lists,", "config_file) raise return (config_files, config, param_info) @staticmethod def update_param_info(param_info, config, is_user_config=False): \"\"\"Update parameter", "type of a value is 'list', converts this list into a string using", "'off'),\\ \"Invalid boolean value in string (%s)\" % str_val return v in ('true',", "which we want to make sure\\ its parent directory exists. \"\"\" dir_name =", "contains key 'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\": \"tensorflow\" },", "with items from *dictionary* which keys names starts with *prefix*. If *remove_prefix* is", "deal with parameters in config that redefine parameters in existing param_info differently. See", "it info object. # TODO what about parameter type and description? else: logging.warn(", "= val['val'] # Existing parameter from user configuration, update its value else: #", "'pandas', 'matplotlib' etc. :return: True if module has been imported, False otherwise. \"\"\"", "in glob(os.path.join(directory, file_name_pattern))] else: files = [f for p in os.walk(directory) for f", "load configurations from :param list files: List of file names to load. If", "permissions and # limitations under the License. \"\"\"Two classes are define here :py:class:`dlbs.IOUtils`", "queue and start resource monitor in background thread. Due to possible execution of", "to check. :param str key: A key that must exist. :param obj default_value:", "matches: Dictionary where matches will be stored if match has been identified. :return:", "else: if value == '': # Take special care if value is an", "on params such as type and help messages for config_file in config_files: if", "list): dictionary[key] = separator.join(str(elem) for elem in dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary, prefix, remove_prefix=True):", "'[dict, list]') if both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False) else: dest[key].extend(source[key]) else: if not both_lists", "'': count = 0 else: count = int(fields_split[3]) self.fields[field_name] = { 'type': field_type,", "value \"tensorflow\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"] }, policy='strict') Match dictionary", "policy in ['relaxed', 'strict'], \"\" for field, value in query.iteritems(): if field not", "for key in dictionary: if key.startswith(prefix): return_key = key[len(prefix):] if remove_prefix else key", "in enumerate(match.groups()): matches['%s_%d' % (field, index+1)] = group continue return True class ConfigurationLoader(object):", "key/value pairs for key in keys. \"\"\" if keys is None: return dictionary", "\\ (str(is_root), key, val_type, str(schema['types'])) ) # So, the type is expected. Warn", "as fobj: return json.load(fobj) @staticmethod def write_json(fname, data, check_extension=False): \"\"\" Dumps *dictionary* as", "= \"%s.%d\" % (file_name, attempt) if not os.path.exists(candidate_file_name): return candidate_file_name attempt += 1", "OR 32. :param dict dictionary: Dictionary to match. :param dict query: Query to", "First group is considered as a key, and second group is considered to", "and description? else: logging.warn( \" Parameter (%s) entirely redefines existing parameter (%s).\" \"", "def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types): raise ConfigurationError( \"Configuration update error - expecting value", "= [] for path_spec in path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif os.path.isfile(path_spec):", "match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"], \"batch\": [16, 32] }, policy='strict') Match", "dict source: Merge data from this dictionary. :param bool is_root: True if **dest**", "= self.fields[field]['count'] if count == -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx],", "if attempt >= max_attempts: msg = \"Cannot find non existing file from pattern", "in-place. :param obj iterable: Iterable object (list, opened file name etc). :param str", "must point to an existing directory.\" % path) if files is not None:", "\"\" for field, value in query.iteritems(): if field not in dictionary: if policy", "models and return boolean variable indicating if import has been succesfull or not.", "update(dest, source, is_root=True): \"\"\"Merge **source** dictionary into **dest** dictionary assuming source and dest", "'r' else ['wb', 'w'] def __enter__(self): if self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname, self.__flags[0]) else:", "file etc). Only those items in *iterable* are considered, that match *pattern* (it's", "user defined parameters are defined either by user in a standard way as", "f) for f in files] else: config_files = [os.path.join(path, f) for f in", "this method is used: 1. Load standard configuration. In this case, parameter redefinition", "was automatically converted from its value).\" } else: param_info[name]['val'] = val # Do", ":param list_or_val keys: Keys to extract :rtype: dict :return: Dictionary that contains key/value", "to use. :param ['relaxed', 'strict'] policy: Policy to match. :param dict matches: Dictionary", "in keys. \"\"\" if keys is None: return dictionary return dict((k, dictionary[k]) for", "value in string (%s)\" % str_val return v in ('true', 1, 'on') else:", "[f for f in glob(os.path.join(directory, file_name_pattern))] else: files = [f for p in", "source dictionary is not in destination dictionary. dest[key] = copy.deepcopy(source[key]) else: # The", "self.fields = {} raw_fields = fields_specs.split(',') for raw_field in raw_fields: fields_split = raw_field.split(':')", "folder. :param str path: Path to load configurations from :param list files: List", "list path_specs: A list of file names / directories. :param str file_name_pattern: A", "(field)] = dictionary[field] continue else: match = re.compile(value).match(dictionary[field]) if not match: return False", "field that\" \" defines its default value. Found this definition: %s=%s\" % (name,", "parameter value val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__", "not be available\", module_name) have_module = False return have_module class Modules(object): \"\"\"A class", "key value: \" \" is_root=%s, key=%s, value type=%s, expected type is one of", "can disable certain functionality if something is missing. \"\"\" HAVE_NUMPY = _ModuleImporter.try_import('numpy') HAVE_PANDAS", "Dictionary that contains key/value pairs for key in keys. \"\"\" if keys is", "from log files. :param dict dictionary: Dictionary to update in-place. :param obj iterable:", "params/vars/extensions param_info = {} # Information on params such as type and help", "in *iterable* are considered, that match *pattern* (it's a regexp epression). If a", "functionality if something is missing. \"\"\" HAVE_NUMPY = _ModuleImporter.try_import('numpy') HAVE_PANDAS = _ModuleImporter.try_import('pandas') HAVE_MATPLOTLIB", "field_type = fields_split[1] assert field_type in ('str', 'int', 'float', 'bool'),\\ \"Invalid field type", "files with JSON extension in **path** are loaded. :return: A tuple consisting of", "overwritten with new ones if key already exists. One use case to use", "prefix in returned dictionary. :return: New dictionary with items which keys names start", "dictionary[field]: return False elif matches is not None: matches['%s_0' % (field)] = dictionary[field]", "is a system configuration. Based on this flag, we deal with parameters in", "= re.compile(pattern) for line in iterable: match = matcher.match(line) if not match: if", "to this dictionary. :param dict source: Merge data from this dictionary. :param bool", "str module_name: A name of a module to try to import, something like", "with key '%s' (key-value definition: '%s'). Error is %s\" % (value, key, line,", "'on') else: assert False, \"Invalid value type %s\" % val_type def get_measurements(self): \"\"\"Dequeue", "\"\"\"Return file name that does not exist. :param str file_name: Input file name.", "pid_folder: A full path to folder where pid file is created. The file", "is None :rtype: bool \"\"\" if query is None: return True assert policy", "try: importlib.import_module(module_name) except ImportError: logging.warn(\"Module '%s' cannot be imported, certain system information will", "def remove_pid_file(self): \"\"\"Deletes pif file from disk.\"\"\" try: os.remove(self.pid_file) except OSError: pass def", "in query must exist in dictionary with the same value to match. If", "used. \"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('%d' % pid) def run(self): \"\"\"Create", "None self.monitor_process = None # Parse fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {}", "contains key/value pairs for key in keys. \"\"\" if keys is None: return", "dictionary. :param list_or_val keys: Keys to find in dictionary :rtype: boolean :return: True", "\"\"\"Deletes pif file from disk.\"\"\" try: os.remove(self.pid_file) except OSError: pass def empty_pid_file(self): \"\"\"Empty", "series of its value. \"\"\" metrics = {} for key in self.fields.keys(): metrics[key]", "will not contain this prefix. The dictionary *dictionary* is not modified. :param dict", "{ \"framework\": \"tensorflow\" }, policy='strict') Match dictionary only if it contains key 'framework'", "is empty, all json files are loaded from that folder. This method fails", "is not a user-provided configuration, we disallow parameter redefinition. if name in param_info:", "if not os.path.isdir(path): raise ValueError(\"Configuration load error. The 'path' parameter (%s) must point", "of benchmarks in containers, we must not delete file here, but create or", "redefinition is prohibited. If `parameters` section in `config` redefines existing parameters in param_info", "fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {} raw_fields = fields_specs.split(',') for raw_field in", "resource monitor is launched as a subprocess. The thread is reading its output", "subdirectories. Only used for entries in path_specs that are directories. :return: List of", "message, type, constraints etc. :param dict config: A dictionary with configuration section that", "'proc.pid') self.frequency = frequency self.queue = None self.monitor_process = None # Parse fields", ":param list path_specs: A list of file names / directories. :param str file_name_pattern:", "does not exist. \"\"\" if not os.path.exists(file_name): return file_name attempt = 0 while", "to load configurations from :param list files: List of file names to load.", "and help messages for config_file in config_files: if not os.path.isfile(config_file): raise ValueError(\"Configuration load", "Types of user defined parameters are defined either by user in a standard", "field specification (%s). Must be name:type:index, name:type:index: or name:type:index:count\" % raw_field field_name =", "query: Query to use. :param ['relaxed', 'strict'] policy: Policy to match. :param dict", "description ('desc').\" \" You should fix this. Parameter definition is\" \" %s =", "dump_json_to_file(dictionary, file_name): \"\"\" Dumps *dictionary* as a json object to a file with", "only value needs to be provided.\" \" We will proceed but you may", "(it's a regexp epression). If a particular item does not match, and *must_match*", "type (%s). Must be one of ('str', 'int', 'float', 'bool')\" % field_type index", "val_type == 'int': return int(str_val) elif val_type == 'float': return float(str_val) elif val_type", "type = '%s'.\" \" Parameter definition is %s = %s\" % (param_info[name]['type'], name,", "all keys from query to be matched. In this case, the intersection of", "== 'relaxed': continue else: return False if isinstance(value, list) or not isinstance(value, basestring):", "files.append(path_spec) return files @staticmethod def get_non_existing_file(file_name, max_attempts = 1000): \"\"\"Return file name that", "if k in dictionary) @staticmethod def contains(dictionary, keys): \"\"\"Checkes if dictionary contains all", "which keys names start with *prefix*. \"\"\" return_dictionary = {} for key in", "in file %s\", config_file) raise return (config_files, config, param_info) @staticmethod def update_param_info(param_info, config,", "while True: candidate_file_name = \"%s.%d\" % (file_name, attempt) if not os.path.exists(candidate_file_name): return candidate_file_name", "not in param_info: param_info[name] = { 'val': val, 'type': val_type, 'desc': \"No description", "self.__fobj def __exit__(self, type, value, traceback): self.__fobj.close() class IOUtils(object): \"\"\"Container for input/output helpers\"\"\"", "long)} for key in source: # Firstly, check that type of value is", "query must exist in dictionary with the same value to match. If policy", "info dictionary that maps parameter name to its description dictionary that contains such", "if name not in param_info: param_info[name] = copy.deepcopy(val) # New parameter, set it", "os.path.isfile(path_spec): files.append(path_spec) return files @staticmethod def get_non_existing_file(file_name, max_attempts = 1000): \"\"\"Return file name", "If True, remove prefix in returned dictionary. :return: New dictionary with items which", "parameter in `config` exists in param_info, it means user has provided their specific", "is True, scans subfolders as well. :param str directory: A directory to search", "definition: %s=%s\" % (name, val) ) if name not in param_info: param_info[name] =", "# Do final validations if 'type' in param_info[name] and param_info[name]['type'] not in ('int',", "parameter info structure, but deal with it in slightly different way. If parameter", "stop(self): \"\"\"Closes queue and waits for resource monitor to finish.\"\"\" with open(self.pid_file, 'w')", "f in os.listdir(path) if f.endswith('.json')] config = {} # Configuration with params/vars/extensions param_info", "else: assert False, \"Invalid value type %s\" % val_type def get_measurements(self): \"\"\"Dequeue all", "dictionary that maps parameter name to its description dictionary that contains such fileds", "method modifies/updates *dictionary* with items from *iterable* object. This object must support ``for", "IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname, 'w') as fobj: json.dump(data, fobj, indent=4) class", "A part of global configuration from this particular file config_section = json.load(file_obj) #", "but create or empty it in host OS. \"\"\" self.empty_pid_file() self.queue = Queue()", "match is set to true\" % (line, pattern)) else: continue key = match.group(1).strip()", "both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int, float, long]') dest[key] = copy.deepcopy(source[key]) if", "OF ANY KIND, either express or implied. # See the License for the", "we deal with it in update_param_info, but just in case if 'val' not", "res shrd cpu mem power gpus_power \"\"\" def __init__(self, launcher, pid_folder, frequency, fields_specs):", "Prefix of keys to be extracted. :param bool remove_prefix: If True, remove prefix", "a file (%s)\" % config_file) with open(config_file) as file_obj: try: # A part", "'bool'): raise ConfigurationError( \"Parameter info update error.\" \" Parameter has invalid type =", "opened file etc). Only those items in *iterable* are considered, that match *pattern*", "`conigs`) folder. :param str path: Path to load configurations from :param list files:", "in host OS. \"\"\" self.empty_pid_file() self.queue = Queue() self.monitor_process = Process( target=ResourceMonitor.monitor_function, args=(self.launcher,", "dictionary and query is used for matching. It's assuemd we match primitive types", "False return True @staticmethod def ensure_exists(dictionary, key, default_value=None): \"\"\" Ensures that the dictionary", "in case if 'val' not in val: raise ConfigurationError( \"Parameter info remove error.\"", "a module to try to import, something like 'numpy', 'pandas', 'matplotlib' etc. :return:", "returned dictionary. :return: New dictionary with items which keys names start with *prefix*.", "key, and second group is considered to be value. Values must be a", "will not be available\", module_name) have_module = False return have_module class Modules(object): \"\"\"A", "of a list of config files, configuration object (dictionary) and dictionary of parameters", "work with gzipped and regular textual files.\"\"\" def __init__(self, fname, mode='r'): self.__fname =", "configuration. In this case, parameter redefinition is prohibited. If `parameters` section in `config`", "Parse fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {} raw_fields = fields_specs.split(',') for raw_field", "isinstance(val, list) else type(val).__name__ if name not in param_info: param_info[name] = { 'val':", "is None \"\"\" if keys is None: return True keys = keys if", "configuration. If False, this is a system configuration. Based on this flag, we", "type %s\" % val_type def get_measurements(self): \"\"\"Dequeue all data, put it into lists", "open(self.pid_file, 'w') as fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object): \"\"\"A private", "applies. For instance: match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='strict') Match dictionary", "configurationi in **config** :param dict param_info: A parameter info dictionary that maps parameter", "are members. \"\"\" def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types): raise ConfigurationError( \"Configuration update error", "Parameter has invalid type = '%s'.\" \" Parameter definition is %s = %s\"", "The file itself may not exist. A typical usage is to ensure that", "to use this method is to populate a dictionary with key-values from log", "indent=4) class DictUtils(object): \"\"\"Container for dictionary helpers.\"\"\" @staticmethod def subdict(dictionary, keys): \"\"\"Return subdictionary", "then condition OR applies. For instance: match(dictionary, query = { \"framework\": \"tensorflow\" },", "*pattern*. If True and not match, raises exception. :param list add_only_keys: If not", "dictionary[key] = separator.join(str(elem) for elem in dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates", "matching. It's assuemd we match primitive types such as numbers and strings not", "tuple of extensions to use. Raises exception of fname does not end with", "generally happen since we deal with it in update_param_info, but just in case", "'list':['extensions']} else: schema = {'types':(list, basestring, int, float, long)} for key in source:", "if isinstance(value, list) else [value] if dictionary[field] not in values: return False if", "information will not be available\", module_name) have_module = False return have_module class Modules(object):", "elif os.path.isfile(path_spec): files.append(path_spec) return files @staticmethod def get_non_existing_file(file_name, max_attempts = 1000): \"\"\"Return file", "configuration. Based on this flag, we deal with parameters in config that redefine", "\" You should fix this. Parameter definition is\" \" %s = %s\", name,", "for :py:func:`os.makedirs` for more details. :param str file_name: A name of the file", "in dictionary: return False return True @staticmethod def ensure_exists(dictionary, key, default_value=None): \"\"\" Ensures", "dictionary if it does not contain key 'framework' OR contains\\ key 'framework' with", "Warn if key value is suspicious - we can do it only for", "files.\"\"\" @staticmethod def load(path, files=None): \"\"\"Loads configurations (normally in `conigs`) folder. :param str", "configuration from this single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration load error. Invalid", "parameter, set it info object. # TODO what about parameter type and description?", "int(fields_split[3]) self.fields[field_name] = { 'type': field_type, 'index': index, 'count': count } @staticmethod def", "if fname is None: return None if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with OpenFile(fname,", "a time series of its value. \"\"\" metrics = {} for key in", "we want to make sure\\ its parent directory exists. \"\"\" dir_name = os.path.dirname(file_name)", "helpers.\"\"\" @staticmethod def subdict(dictionary, keys): \"\"\"Return subdictionary containing only keys from 'keys'. :param", "are two scenarios this method is used: 1. Load standard configuration. In this", "as a json object to a file with *file_name* name. :param dict dictionary:", "\"\"\" if 'parameters' not in config: return params = config['parameters'] for name in", "in. :param str prefix: Prefix of keys to be extracted. :param bool remove_prefix:", "system configuration. Based on this flag, we deal with parameters in config that", "for dictionary helpers.\"\"\" @staticmethod def subdict(dictionary, keys): \"\"\"Return subdictionary containing only keys from", "'%s' cannot be imported, certain system information will not be available\", module_name) have_module", "\"\"\" if path is None: raise ValueError(\"Configuration load error. The 'path' parameter cannot", "(%s). Must be one of %s\" % extensions) @staticmethod def read_json(fname, check_extension=False): \"\"\"Reads", "if isinstance(value, list) or not isinstance(value, basestring): values = value if isinstance(value, list)", "dump into a JSON file. :param str file_name: Name of a file to", "int pid: A pid to write. This is a debugging function and most", "Specifies if every element in *iterable* must match\\ *pattern*. If True and not", "The 'path' parameter cannot be None.\") if not os.path.isdir(path): raise ValueError(\"Configuration load error.", "and process.poll() is not None: break if output: # The 'output' is a", "that are directories. :return: List of file names satisfying *file_name_pattern* pattern. \"\"\" files", "Dumps *dictionary* as a json object to a file with *file_name* name. :param", "with OpenFile(fname, 'w') as fobj: json.dump(data, fobj, indent=4) class DictUtils(object): \"\"\"Container for dictionary", "empty, all json files are loaded from that folder. This method fails if", ":param str file_name: A name of the file for which we want to", "this flag, we deal with parameters in config that redefine parameters in existing", "'val' field that\" \" defines its default value. Found this definition: %s=%s\" %", "Must be one of ('str', 'int', 'float', 'bool')\" % field_type index = int(fields_split[2])", "is modified in-place. :param dict dictionary: Dictionary to modify. :param str separator: An", "execution of benchmarks in containers, we must not delete file here, but create", "with gzipped and regular textual files.\"\"\" def __init__(self, fname, mode='r'): self.__fname = fname", "- we can do it only for root. if is_root and key not", "None else JSON loaded from the file. \"\"\" if fname is None: return", "param_info[name] or 'desc' not in param_info[name]: logging.warn( \"Parameter definition does not contain type", "or name:type:index:count\" % raw_field field_name = fields_split[0] assert field_name not in self.fields,\\ \"Found", "files, configuration object (dictionary) and dictionary of parameters info This method loads configuration", "keys that are added into\\ *dictionary*. Others are ignored. :param boolean ignore_erros: If", "below. We are interested here only in parameters section where parameter information is", "contains key *key* If key does not exist, it adds a new item", "__init__(self, launcher, pid_folder, frequency, fields_specs): \"\"\"Initializes resource monitor but does not create queue", "error - unexpected type of key value: \" \" is_root=%s, key=%s, value type=%s,", "modified in-place. :param dict dictionary: Dictionary to modify. :param str separator: An item", "param_info: param_info[name] = copy.deepcopy(val) # New parameter, set it info object. # TODO", "os.listdir(path) if f.endswith('.json')] config = {} # Configuration with params/vars/extensions param_info = {}", "sampling frequency in seconds. Can be something like 0.1 seconds :param multiprocessing.Queue queue:", "these objects are members. \"\"\" def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types): raise ConfigurationError( \"Configuration", "value = match.group(2).strip() value = json.loads(value) if len(value) > 0 else None if", "raise ValueError(\"Configuration load error. The 'path' parameter cannot be None.\") if not os.path.isdir(path):", "in a directory, possibly, recursively. Find files which names satisfy *file_name_pattern* pattern in", "is an empty string if value != dictionary[field]: return False elif matches is", "in containers, we must not delete file here, but create or empty it", "new parameter definition (value) is %s\" % (name, str(param_info[name]), val) ) if isinstance(val,", "os.makedirs(dir_name) @staticmethod def find_files(directory, file_name_pattern, recursively=False): \"\"\"Find files in a directory, possibly, recursively.", "import subprocess import importlib from multiprocessing import Process from multiprocessing import Queue from", "first file name that file does not exist. \"\"\" if not os.path.exists(file_name): return", "**config** is a result of parsing a JSON configuration file. :return: A copy", "error. Invalid JSON configuration in file %s\", config_file) raise return (config_files, config, param_info)", "private class that imports a particular models and return boolean variable indicating if", "or agreed to in writing, software # distributed under the License is distributed", "we can write to this file. If path to parent folder does not", "default_value=None): \"\"\" Ensures that the dictionary *dictionary* contains key *key* If key does", "every value in dictionary that is list to strings. For every item in", "dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates new dictionary with items which keys", "not None: IOUtils.mkdirf(file_name) with open(file_name, 'w') as file_obj: json.dump(dictionary, file_obj, indent=4) @staticmethod def", "details. :param str file_name: A name of the file for which we want", "in parameters section where parameter information is defined. There are two scenarios this", "dictionary :rtype: boolean :return: True if all keys are in dictionary or keys", "it does not contain key 'framework' OR contains\\ key 'framework' with value \"tensorflow\".", "def empty_pid_file(self): \"\"\"Empty pid file.\"\"\" try: with open(self.pid_file, 'w'): pass except IOError: pass", "ImportError: logging.warn(\"Module '%s' cannot be imported, certain system information will not be available\",", "field type (%s). Must be one of ('str', 'int', 'float', 'bool')\" % field_type", "name of a root key is '%s' but expected is one of '%s'\",", "a json-parseable string. \"\"\" matcher = re.compile(pattern) for line in iterable: match =", "@staticmethod def load(path, files=None): \"\"\"Loads configurations (normally in `conigs`) folder. :param str path:", "ValueError(\"Configuration load error. The 'path' parameter (%s) must point to an existing directory.\"", "is thrown. if is_root: schema = {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']} else: schema", "from 'keys'. :param dict dictionary: Input dictionary. :param list_or_val keys: Keys to extract", "boolean value in string (%s)\" % str_val return v in ('true', 1, 'on')", "(list, opened file name etc). :param str patter: A regexp pattern for matching", "search. Only used for entries in path_specs that are directories. :param bool recursively:", "is None: raise ValueError(\"File name is None\") if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname)", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "'path' parameter cannot be None.\") if not os.path.isdir(path): raise ValueError(\"Configuration load error. The", "info structure, but deal with it in slightly different way. If parameter in", "\"Configuration update error - expecting value types to be same and one of", "must exist. :param obj default_value: Default value for key if it does not", "%s\" % extensions) @staticmethod def read_json(fname, check_extension=False): \"\"\"Reads JSON object from file 'fname'.", "is_user_config=False): \"\"\"Update parameter info dictionary based on configurationi in **config** :param dict param_info:", "A file name pattern to search. Only used for entries in path_specs that", "to write. This is a debugging function and most likely should not be", "(param_info[name]['type'], name, param_info[name]) ) if 'type' not in param_info[name] or 'desc' not in", "\"Invalid boolean value in string (%s)\" % str_val return v in ('true', 1,", "\" This is a system configuration error that must not happen.\" \" Parameter", "License. # You may obtain a copy of the License at # #", "values = value if isinstance(value, list) else [value] if dictionary[field] not in values:", "(str(is_root), key, val_type, str(schema['types'])) ) # So, the type is expected. Warn if", "for key in dictionary: if isinstance(dictionary[key], list): dictionary[key] = separator.join(str(elem) for elem in", "if name in param_info: raise ConfigurationError( \"Parameter info update error.\" \" Parameter redefinition", "pattern to search. For instance, is can be '*.log' :param bool recursively: If", "None or JSON object :return: None of fname is None else JSON loaded", "redefine parameters in existing param_info differently. See comments below. We are interested here", "name of the file for which we want to make sure\\ its parent", "is_user_config=False) # Joing configuration from this single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration", "self.launcher = launcher self.pid_file = os.path.join(pid_folder, 'proc.pid') self.frequency = frequency self.queue = None", "\" \" is_root=%s, key=%s, value type=%s, expected type is one of %s\" %", "config_files: if not os.path.isfile(config_file): raise ValueError(\"Configuration load error. Configuration data cannot be loaded", "param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'): raise ConfigurationError( \"Parameter info", "Joing configuration from this single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration load error.", "Based on this flag, we deal with parameters in config that redefine parameters", "virt res shrd cpu mem power gpus_power \"\"\" def __init__(self, launcher, pid_folder, frequency,", "Configuration data cannot be loaded for not a file (%s)\" % config_file) with", "can be '*.log' :param bool recursively: If True, search in subdirectories. :return: List", "external resource manager that monitors system resource consumption. proc_pid date virt res shrd", "succesfull or not. Used by a Modules class to identify if optional python", "specifies keys that are added into\\ *dictionary*. Others are ignored. :param boolean ignore_erros:", "removes parameter info leaving only their values :param dict config: A dictionary with", "should not generally happen since we deal with it in update_param_info, but just", "non existing file from pattern %s\" raise ValueError(msg % file_name) @staticmethod def check_file_extensions(fname,", "The dictionary *dictionary* is not modified. :param dict dictionary: Dictionary to search keys", "There are two scenarios this method is used: 1. Load standard configuration. In", "if val_type == 'str': return str_val elif val_type == 'int': return int(str_val) elif", "of a file to serialie dictionary in. \"\"\" if fname is None: raise", "Query to use. :param ['relaxed', 'strict'] policy: Policy to match. :param dict matches:", "in param_info (already loaded params), program terminates. 2. Load user-provided configuration. In this", "'type': field_type, 'index': index, 'count': count } @staticmethod def monitor_function(launcher, pid_file, frequency, queue):", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "and one of %s but\" \" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)\" % (valid_types,", "in keys: if key not in dictionary: return False return True @staticmethod def", "\"caffe2\"] }, policy='strict') Match dictionary only if it contains key 'framework' with value", "error that must not happen.\" \" Parameter %s=%s, new parameter definition (value) is", "not exist. :param str file_name: Input file name. :rtype: str :return: The 'file_name'", "Dictionary to serialize. :param str file_name: Name of a file to serialie dictionary", "object from file 'fname'. :param str fname: File name. :param boolean check_extension: If", "ValueError(\"Invalid file extension (%s). Must be one of %s\" % extensions) @staticmethod def", "is list to strings. For every item in *dictionary*, if type of a", "for update_param_info method). \"\"\" if path is None: raise ValueError(\"Configuration load error. The", "contains(dictionary, keys): \"\"\"Checkes if dictionary contains all keys in 'keys' :param dict dictionary:", "with external resource manager that monitors system resource consumption. proc_pid date virt res", "loads configuration files located in 'path'. If `files` is empty, all json files", "len(fields_split) in (3, 4),\\ \"Invalid format of field specification (%s). Must be name:type:index,", "self.__fobj = open(self.__fname, self.__flags[1]) return self.__fobj def __exit__(self, type, value, traceback): self.__fobj.close() class", "'r'] if mode == 'r' else ['wb', 'w'] def __enter__(self): if self.__fname.endswith('.gz'): self.__fobj", "item (%s=%s) has been parsed and added to dictionary\", key, str(value)) except ValueError", "@staticmethod def contains(dictionary, keys): \"\"\"Checkes if dictionary contains all keys in 'keys' :param", "= os.path.dirname(file_name) if dir_name != '' and not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def find_files(directory,", "*dictionary* with items from *iterable* object. This method modifies/updates *dictionary* with items from", "query = { \"framework\": \"tensorflow\" }, policy='strict') Match dictionary only if it contains", "ValueError(\"File name is None\") if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname, 'w')", "in config: return params = config['parameters'] for name in params: val = params[name]", "if both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False) else: dest[key].extend(source[key]) else: if not both_lists and not", "has been parsed and added to dictionary\", key, str(value)) except ValueError as err:", "imported, False otherwise. \"\"\" have_module = True try: importlib.import_module(module_name) except ImportError: logging.warn(\"Module '%s'", "are loaded from that folder. This method fails if one parameter is defined", "both_primitive = type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring, int, float, long)) if is_root:", "copy.deepcopy(source[key]) if both_lists else source[key] class ResourceMonitor(object): \"\"\"The class is responsible for launching/shutting", "use. Raises exception of fname does not end with one of the extensions.", "list to strings. For every item in *dictionary*, if type of a value", "frequency in seconds. Can be something like 0.1 seconds \"\"\" self.launcher = launcher", "def load(path, files=None): \"\"\"Loads configurations (normally in `conigs`) folder. :param str path: Path", "file. If path to parent folder does not exist, it will be created.", "to strings. For every item in *dictionary*, if type of a value is", "prefix: Prefix of keys to be extracted. :param bool remove_prefix: If True, remove", ":param bool must_match: Specifies if every element in *iterable* must match\\ *pattern*. If", "we must not delete file here, but create or empty it in host", "re.compile(pattern) for line in iterable: match = matcher.match(line) if not match: if must_match:", "License, Version 2.0 (the \"License\"); # you may not use this file except", "dict): # This should not generally happen since we deal with it in", "The **config** is a result of parsing a JSON configuration file. :return: A", "thrown. Regexp pattern must return two groups (1 and 2). First group is", "indent=4) @staticmethod def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False): \"\"\" Updates *dictionary* with", "a system configuration error that must not happen.\" \" Parameter %s=%s, new parameter", "target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency, self.queue) ) self.monitor_process.start() def stop(self): \"\"\"Closes queue and waits", "== -1: metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index],", "configuration files located in 'path'. If `files` is empty, all json files are", "only those items are added to *dictionary*, that are in this list. Existing", ":param str key: A key that must exist. :param obj default_value: Default value", "of numbers. queue.put(output.strip()) @staticmethod def str_to_type(str_val, val_type): if val_type == 'str': return str_val", ":return: A copy of **config** with info removed \"\"\" clean_config = copy.deepcopy(config) if", "been imported, False otherwise. \"\"\" have_module = True try: importlib.import_module(module_name) except ImportError: logging.warn(\"Module", "files @staticmethod def gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get files specified by an `inputs` parameter.", "case, the intersection of keys in dictionary and query is used for matching.", "as a key, and second group is considered to be value. Values must", "also applies for update_param_info method). \"\"\" if path is None: raise ValueError(\"Configuration load", "contain parameters, variables and extensions. The **config** is a result of parsing a", "metrics def remove_pid_file(self): \"\"\"Deletes pif file from disk.\"\"\" try: os.remove(self.pid_file) except OSError: pass", "args=(self.launcher, self.pid_file, self.frequency, self.queue) ) self.monitor_process.start() def stop(self): \"\"\"Closes queue and waits for", "or query is None :rtype: bool \"\"\" if query is None: return True", "names / directories. :param str file_name_pattern: A file name pattern to search. Only", "assert policy in ['relaxed', 'strict'], \"\" for field, value in query.iteritems(): if field", "for elem in dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates new dictionary with", ":param dict dictionary: Input dictionary. :param list_or_val keys: Keys to extract :rtype: dict", "most likely should not be used. \"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('%d'", "invalid type = '%s'.\" \" Parameter definition is %s = %s\" % (param_info[name]['type'],", "'': # Take special care if value is an empty string if value", "dictionary[key] = value logging.debug(\"Key-value item (%s=%s) has been parsed and added to dictionary\",", "universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = process.stdout.readline() if output == '' and", "dict dictionary: Input dictionary. :param list_or_val keys: Keys to extract :rtype: dict :return:", "= { \"framework\": [\"tensorflow\", \"caffe2\"] }, policy='strict') Match dictionary only if it contains", "if len(fields_split) == 3: count = -1 elif fields_split[3] == '': count =", "(%s) must point to an existing directory.\" % path) if files is not", "file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration load error. Invalid JSON configuration in file", "date virt res shrd cpu mem power gpus_power while not self.queue.empty(): data =", "config, param_info) @staticmethod def update_param_info(param_info, config, is_user_config=False): \"\"\"Update parameter info dictionary based on", "is_root: True if **dest** and *source** are root configuration objects. False if these", "to this file. If path to parent folder does not exist, it will", "file (%s)\" % config_file) with open(config_file) as file_obj: try: # A part of", "enumerate(match.groups()): matches['%s_%d' % (field, index+1)] = group continue return True class ConfigurationLoader(object): \"\"\"Loads", "dictionary: Dictionary to serialize. :param str file_name: Name of a file to serialie", "source: Merge data from this dictionary. :param bool is_root: True if **dest** and", "are directories. :return: List of file names satisfying *file_name_pattern* pattern. \"\"\" files =", "dictionary[field] for index, group in enumerate(match.groups()): matches['%s_%d' % (field, index+1)] = group continue", "<- Source(key=%s, val_type=%s)\" % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__) ) # Types and", "to match. If policy is 'relaxed', dictionary may not contain all keys from", "Input dictionary. :param list_or_val keys: Keys to find in dictionary :rtype: boolean :return:", "recursively=False): \"\"\"Find/get files specified by an `inputs` parameter. :param list path_specs: A list", "directory: A directory to search files in. :param str file_name_pattern: A file name", "converted from its value).\" } else: param_info[name]['val'] = val # Do final validations", "4),\\ \"Invalid format of field specification (%s). Must be name:type:index, name:type:index: or name:type:index:count\"", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "config_file) with open(config_file) as file_obj: try: # A part of global configuration from", "list), 'dict':['parameters', 'variables'], 'list':['extensions']} else: schema = {'types':(list, basestring, int, float, long)} for", "class _ModuleImporter(object): \"\"\"A private class that imports a particular models and return boolean", "list) or not isinstance(value, basestring): values = value if isinstance(value, list) else [value]", "we still update parameter info structure, but deal with it in slightly different", "process.stdout.readline() if output == '' and process.poll() is not None: break if output:", "List of file names to load. If None, all files with JSON extension", "queue): \"\"\"A main monitor worker function. :param str launcher: A full path to", "list_or_val keys: Keys to find in dictionary :rtype: boolean :return: True if all", "update error.\" \" Parameter redefinition is not allowed for non-user configuration.\" \" This", "('true', 'false', '1', '0', 'on', 'off'),\\ \"Invalid boolean value in string (%s)\" %", "\"\"\" Dumps *dictionary* as a json object to a file with *file_name* name.", "file name that file does not exist. \"\"\" if not os.path.exists(file_name): return file_name", "not allowed for non-user configuration.\" \" This is a system configuration error that", "'w') as fhandle: fhandle.write('%d' % pid) def run(self): \"\"\"Create queue and start resource", "dictionary is not in destination dictionary. dest[key] = copy.deepcopy(source[key]) else: # The key", "'w'): pass except IOError: pass def write_pid_file(self, pid): \"\"\"Write the pid into pid", "A name of the file for which we want to make sure\\ its", "'%s' but expected is one of '%s'\", key, schema[val_type]) if key not in", "if value\\ is not a json-parseable string. \"\"\" matcher = re.compile(pattern) for line", "an empty string if value != dictionary[field]: return False elif matches is not", "value logging.debug(\"Key-value item (%s=%s) has been parsed and added to dictionary\", key, str(value))", "ConfigurationError( \"Configuration update error - unexpected type of key value: \" \" is_root=%s,", "keys if isinstance(keys, list) else [keys] for key in keys: if key not", "Keys to find in dictionary :rtype: boolean :return: True if all keys are", "a file with *file_name* name. :param dict dictionary: Dictionary to serialize. :param str", "keys from query to be matched. In this case, the intersection of keys", "subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = process.stdout.readline() if output == ''", "used for entries in path_specs that are directories. :param bool recursively: If True,", "isinstance(val, basestring) or isinstance(val, list) else type(val).__name__ if name not in param_info: param_info[name]", "['relaxed', 'strict'], \"\" for field, value in query.iteritems(): if field not in dictionary:", "in dictionary or keys is None \"\"\" if keys is None: return True", "if not os.path.exists(candidate_file_name): return candidate_file_name attempt += 1 if attempt >= max_attempts: msg", "a string printed out by a resource monitor # script. It's a whitespace", "self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object): \"\"\"A private class that imports a particular", "as value, help message, type, constraints etc. :param dict config: A dictionary with", "parsing a JSON configuration file. :param bool is_user_config: If True, the config object", "val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__ if name", "'int': return int(str_val) elif val_type == 'float': return float(str_val) elif val_type == 'bool':", "key in source dictionary is not in destination dictionary. dest[key] = copy.deepcopy(source[key]) else:", "= fields_specs.split(',') for raw_field in raw_fields: fields_split = raw_field.split(':') assert len(fields_split) in (3,", ":param dict source: Merge data from this dictionary. :param bool is_root: True if", "file_name: A name of the file for which we want to make sure\\", "disable certain functionality if something is missing. \"\"\" HAVE_NUMPY = _ModuleImporter.try_import('numpy') HAVE_PANDAS =", "list]') if both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False) else: dest[key].extend(source[key]) else: if not both_lists and", "else: # Just parameter value val_type = 'str' if isinstance(val, basestring) or isinstance(val,", "continue key = match.group(1).strip() try: value = match.group(2).strip() value = json.loads(value) if len(value)", "be same and one of %s but\" \" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)\"", "(field, index+1)] = group continue return True class ConfigurationLoader(object): \"\"\"Loads experimenter configuration from", "def __exit__(self, type, value, traceback): self.__fobj.close() class IOUtils(object): \"\"\"Container for input/output helpers\"\"\" @staticmethod", "gpus_power while not self.queue.empty(): data = self.queue.get().strip().split() for field in self.fields: tp =", "% (field)] = dictionary[field] for index, group in enumerate(match.groups()): matches['%s_%d' % (field, index+1)]", "in subdirectories. :return: List of file names satisfying *file_name_pattern* pattern. \"\"\" if not", "else [value] if dictionary[field] not in values: return False if matches is not", "(%s=%s) has been parsed and added to dictionary\", key, str(value)) except ValueError as", "one of ('str', 'int', 'float', 'bool')\" % field_type index = int(fields_split[2]) if len(fields_split)", "extract :rtype: dict :return: Dictionary that contains key/value pairs for key in keys.", "= params[name] if not is_user_config: # If this is not a user-provided configuration,", "(name, str(param_info[name]), val) ) if isinstance(val, dict): # This is a complete parameter", "a tuple.\" if not fname.endswith(extensions): raise ValueError(\"Invalid file extension (%s). Must be one", "A queue to communicate measurements. A resource monitor is launched as a subprocess.", "matches['%s_0' % (field)] = dictionary[field] for index, group in enumerate(match.groups()): matches['%s_%d' % (field,", "matches is not None: matches['%s_0' % (field)] = dictionary[field] for index, group in", "You should fix this. Parameter definition is\" \" %s = %s\", name, param_info[name]", "or implied. # See the License for the specific language governing permissions and", "pattern. \"\"\" if not recursively: files = [f for f in glob(os.path.join(directory, file_name_pattern))]", "their values :param dict config: A dictionary with configuration section that may contain", "create queue and process. :param str launcher: A full path to resource monitor", "type is one of %s\" % \\ (str(is_root), key, val_type, str(schema['types'])) ) #", "raise ConfigurationError(\"Cannot match key-value from '%s' with pattern '%s'. Must match is set", "\" We will proceed but you may want to fix this.\", json.dumps(val), json.dumps(param_info[name])", "something like 0.1 seconds :param multiprocessing.Queue queue: A queue to communicate measurements. A", "not in values: return False if matches is not None: matches['%s_0' % (field)]", "out by a resource monitor # script. It's a whitespace separated string of", "*must_match* is True, *ConfigurationError* exception is thrown. Regexp pattern must return two groups", "existing directory.\" % path) if files is not None: config_files = [os.path.join(path, f)", "assert len(fields_split) in (3, 4),\\ \"Invalid format of field specification (%s). Must be", "in iterable`` (list, opened file etc). Only those items in *iterable* are considered,", "strings not lists or dictionaries. If values in query are lists, then condition", "'.json.gz')) with OpenFile(fname, 'r') as fobj: return json.load(fobj) @staticmethod def write_json(fname, data, check_extension=False):", "if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with OpenFile(fname, 'r') as fobj: return json.load(fobj) @staticmethod", "in 'path'. If `files` is empty, all json files are loaded from that", "just in case if 'val' not in val: raise ConfigurationError( \"Parameter info remove", "field (%s)\" % field_name field_type = fields_split[1] assert field_type in ('str', 'int', 'float',", "dictionary. :param dict source: Merge data from this dictionary. :param bool is_root: True", "Modules(object): \"\"\"A class that enumerates non-standard python modules this project depends on. They", "that is list to strings. For every item in *dictionary*, if type of", "is_root: if not both_dicts and not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]') if", "items in *dictionary* are overwritten with new ones if key already exists. One", "for which we want to make sure\\ its parent directory exists. \"\"\" dir_name", "that may contain parameters, variables and extensions. The **config** is a result of", "(%s)\" % config_file) with open(config_file) as file_obj: try: # A part of global", "instance: match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='strict') Match dictionary only if", "not None: config_files = [os.path.join(path, f) for f in files] else: config_files =", "metrics = {} for key in self.fields.keys(): metrics[key] = [] # What's in", "OR\\ \"caffe2\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"], \"batch\": [16, 32] },", "in params: val = params[name] if not is_user_config: # If this is not", "if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'): raise", "None: return True assert policy in ['relaxed', 'strict'], \"\" for field, value in", "keys): \"\"\"Checkes if dictionary contains all keys in 'keys' :param dict dictionary: Input", "'list', converts this list into a string using separator *separator*. The dictictionary is", "considered to be value. Values must be a json-parseable strings. If *add_only_keys* is", "}, policy='strict') Match dictionary only if it contains key 'framework' with value \"tensorflow\"", "pattern to search. Only used for entries in path_specs that are directories. :param", "= val['val'] return clean_config @staticmethod def update(dest, source, is_root=True): \"\"\"Merge **source** dictionary into", ":param str pid_folder: A full path to folder where pid file is created.", "dict param_info: A parameter info dictionary that maps parameter name to its description", "errors. :raises ConfigurationError: If *must_match* is True and not match or if value\\", "by a Modules class to identify if optional python modules are available. \"\"\"", "same value to match. If policy is 'relaxed', dictionary may not contain all", "str file_name: Name of a file to serialie dictionary in. \"\"\" if file_name", "help message, type, constraints etc. :param dict config: A dictionary with configuration section", "\"\"\" if fname is None: return None if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with", "import module. :param str module_name: A name of a module to try to", "method). \"\"\" if path is None: raise ValueError(\"Configuration load error. The 'path' parameter", "value. Found this definition: %s=%s\" % (name, val) ) if name not in", ":param obj default_value: Default value for key if it does not exist. \"\"\"", "License. \"\"\"Two classes are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\" import os import", "multiprocessing import Process from multiprocessing import Queue from glob import glob from dlbs.exceptions", "that type of value is expected. val_type = type(source[key]).__name__ if not isinstance(source[key], schema['types']):", ":py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\" import os import copy import json import gzip import", "import logging import subprocess import importlib from multiprocessing import Process from multiprocessing import", "clean_config['parameters'] for name in params: val = params[name] if isinstance(val, dict): # This", "None.\") if not os.path.isdir(path): raise ValueError(\"Configuration load error. The 'path' parameter (%s) must", "data from this dictionary. :param bool is_root: True if **dest** and *source** are", "True @staticmethod def ensure_exists(dictionary, key, default_value=None): \"\"\" Ensures that the dictionary *dictionary* contains", "*source** are root configuration objects. False if these objects are members. \"\"\" def", "is_root=%s, key=%s, value type=%s, expected type is one of %s\" % \\ (str(is_root),", "name in params: val = params[name] if isinstance(val, dict): # This should not", "is not None, only those items are added to *dictionary*, that are in", "'float', 'bool'): raise ConfigurationError( \"Parameter info update error.\" \" Parameter has invalid type", "launching/shutting down/communicating with external resource manager that monitors system resource consumption. proc_pid date", "use this file except in compliance with the License. # You may obtain", "json.load(fobj) @staticmethod def write_json(fname, data, check_extension=False): \"\"\" Dumps *dictionary* as a json object", "A full path to resource monitor script. :param str pid_folder: A full path", "are actually dictionaries. If policy is 'strict', every key in query must exist", "1. Load standard configuration. In this case, parameter redefinition is prohibited. If `parameters`", ":param bool recursively: If True, search in subdirectories. Only used for entries in", "('str', 'int', 'float', 'bool'),\\ \"Invalid field type (%s). Must be one of ('str',", "frequency in seconds. Can be something like 0.1 seconds :param multiprocessing.Queue queue: A", "names satisfying *file_name_pattern* pattern. \"\"\" if not recursively: files = [f for f", "Source(key=%s, val_type=%s)\" % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__) ) # Types and expected", "'.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname, 'w') as fobj: json.dump(data, fobj, indent=4) class DictUtils(object): \"\"\"Container", "in `conigs`) folder. :param str path: Path to load configurations from :param list", "IOUtils(object): \"\"\"Container for input/output helpers\"\"\" @staticmethod def mkdirf(file_name): \"\"\"Makes sure that parent folder", "class that enumerates non-standard python modules this project depends on. They are optional,", "non-standard python modules this project depends on. They are optional, so we can", ":param ['relaxed', 'strict'] policy: Policy to match. :param dict matches: Dictionary where matches", "load(path, files=None): \"\"\"Loads configurations (normally in `conigs`) folder. :param str path: Path to", "mode='r'): self.__fname = fname self.__flags = ['rb', 'r'] if mode == 'r' else", "modules are available. \"\"\" @staticmethod def try_import(module_name): \"\"\"Tries to import module. :param str", "and extensions. The **config** is a result of parsing a JSON configuration file.", "for config_file in config_files: if not os.path.isfile(config_file): raise ValueError(\"Configuration load error. Configuration data", "line in iterable: match = matcher.match(line) if not match: if must_match: raise ConfigurationError(\"Cannot", "or empty it in host OS. \"\"\" self.empty_pid_file() self.queue = Queue() self.monitor_process =", "('int', 'str', 'float', 'bool'): raise ConfigurationError( \"Parameter info update error.\" \" Parameter has", "basestring, int, float, long]') dest[key] = copy.deepcopy(source[key]) if both_lists else source[key] class ResourceMonitor(object):", "isinstance(value, basestring): values = value if isinstance(value, list) else [value] if dictionary[field] not", "has been identified. :return: True if match or query is None :rtype: bool", "multiprocessing.Queue queue: A queue to communicate measurements. A resource monitor is launched as", "keys in new dictionary will not contain this prefix. The dictionary *dictionary* is", "param_info[name] ) @staticmethod def remove_info(config): \"\"\"In parameter section of a **config** the function", "something like 'numpy', 'pandas', 'matplotlib' etc. :return: True if module has been imported,", "in ('str', 'int', 'float', 'bool'),\\ \"Invalid field type (%s). Must be one of", "key = match.group(1).strip() try: value = match.group(2).strip() value = json.loads(value) if len(value) >", "[] for path_spec in path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif os.path.isfile(path_spec): files.append(path_spec)", "\"\"\"Creates new dictionary with items which keys start with *prefix*. Creates new dictionary", "is modified in-place. :param dict dictionary: Dictionary to check. :param str key: A", "may not exist. A typical usage is to ensure that we can write", "raise ConfigurationError( \"Parameter info update error.\" \" Parameter has invalid type = '%s'.\"", "src_val_type, valid_types): raise ConfigurationError( \"Configuration update error - expecting value types to be", "@staticmethod def try_import(module_name): \"\"\"Tries to import module. :param str module_name: A name of", "in **config** :param dict param_info: A parameter info dictionary that maps parameter name", "lists_to_strings(dictionary, separator=' '): \"\"\" Converts every value in dictionary that is list to", "and (b) it contains key 'batch' with value 16 OR 32. :param dict", "\"\"\"Closes queue and waits for resource monitor to finish.\"\"\" with open(self.pid_file, 'w') as", "and strings not lists or dictionaries. If values in query are lists, then", "This is a complete parameter definition with name, value and description. if 'val'", "point to an existing directory.\" % path) if files is not None: config_files", "str(schema['types'])) ) # So, the type is expected. Warn if key value is", "\"Configuration update error - unexpected type of key value: \" \" is_root=%s, key=%s,", "method fails if one parameter is defined in multiple files. This is intended", "a root key is '%s' but expected is one of '%s'\", key, schema[val_type])", "find_files(directory, file_name_pattern, recursively=False): \"\"\"Find files in a directory, possibly, recursively. Find files which", "it contains key 'framework' with value \"tensorflow\" OR\\ \"caffe2\". match(dictionary, query = {", "else key return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod def dump_json_to_file(dictionary, file_name): \"\"\" Dumps", "\"\"\"Return subdictionary containing only keys from 'keys'. :param dict dictionary: Input dictionary. :param", "True, keys in new dictionary will not contain this prefix. The dictionary *dictionary*", "(name, val) ) if name not in param_info: param_info[name] = copy.deepcopy(val) # New", "from source is in dest. both_dicts = isinstance(dest[key], dict) and isinstance(source[key], dict) both_lists", "list_or_val keys: Keys to extract :rtype: dict :return: Dictionary that contains key/value pairs", "Just parameter value val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else", "if name not in param_info: param_info[name] = { 'val': val, 'type': val_type, 'desc':", "parameters are defined either by user in a standard way as we define", "'1', '0', 'on', 'off'),\\ \"Invalid boolean value in string (%s)\" % str_val return", "*dictionary*. The *query* and *dictionary* are actually dictionaries. If policy is 'strict', every", "to search files in. :param str file_name_pattern: A file name pattern to search.", "a system configuration. Based on this flag, we deal with parameters in config", "value to match. If policy is 'relaxed', dictionary may not contain all keys", "if every element in *iterable* must match\\ *pattern*. If True and not match,", "in param_info: param_info[name] = { 'val': val, 'type': val_type, 'desc': \"No description for", "function. :param str launcher: A full path to resource monitor script. :param str", "self.pid_file = os.path.join(pid_folder, 'proc.pid') self.frequency = frequency self.queue = None self.monitor_process = None", "**source** dictionary into **dest** dictionary assuming source and dest are JSON configuration configs", "{ \"framework\": [\"tensorflow\", \"caffe2\"] }, policy='strict') Match dictionary only if it contains key", "containing only keys from 'keys'. :param dict dictionary: Input dictionary. :param list_or_val keys:", "in config_files: if not os.path.isfile(config_file): raise ValueError(\"Configuration load error. Configuration data cannot be", "param_info[name] = copy.deepcopy(val) # New parameter, set it info object. # TODO what", "% field_name field_type = fields_split[1] assert field_type in ('str', 'int', 'float', 'bool'),\\ \"Invalid", ":param str module_name: A name of a module to try to import, something", "any data: A data to dump into a JSON file. :param str file_name:", "keys: if key not in dictionary: return False return True @staticmethod def ensure_exists(dictionary,", "str(err))) @staticmethod def match(dictionary, query, policy='relaxed', matches=None): \"\"\" Match *query* against *dictionary*. The", "parent directory exists. \"\"\" dir_name = os.path.dirname(file_name) if dir_name != '' and not", "OSError: pass def empty_pid_file(self): \"\"\"Empty pid file.\"\"\" try: with open(self.pid_file, 'w'): pass except", "not generally happen since we deal with it in update_param_info, but just in", "Converts every value in dictionary that is list to strings. For every item", "fobj, indent=4) class DictUtils(object): \"\"\"Container for dictionary helpers.\"\"\" @staticmethod def subdict(dictionary, keys): \"\"\"Return", "% (line, pattern)) else: continue key = match.group(1).strip() try: value = match.group(2).strip() value", "type(source[key]), '[list, basestring, int, float, long]') dest[key] = copy.deepcopy(source[key]) if both_lists else source[key]", "This is a debugging function and most likely should not be used. \"\"\"", "# The key in source dictionary is not in destination dictionary. dest[key] =", "considered as a key, and second group is considered to be value. Values", "raw_field.split(':') assert len(fields_split) in (3, 4),\\ \"Invalid format of field specification (%s). Must", "[ launcher, pid_file, '', str(frequency) ] process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while", "such as numbers and strings not lists or dictionaries. If values in query", "tp)) elif count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for index", "== 'float': return float(str_val) elif val_type == 'bool': v = str_val.lower() assert v", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "% (name, val) ) if name not in param_info: param_info[name] = copy.deepcopy(val) #", "or isinstance(val, list) else type(val).__name__ if name not in param_info: param_info[name] = {", "an existing directory.\" % path) if files is not None: config_files = [os.path.join(path,", "and isinstance(source[key], dict) both_lists = isinstance(dest[key], list) and isinstance(source[key], list) both_primitive = type(dest[key])", "dest[key] = copy.deepcopy(source[key]) else: # The key from source is in dest. both_dicts", "check. :param tuple extensions: A tuple of extensions to use. Raises exception of", "new ones if key already exists. One use case to use this method", "return metrics def remove_pid_file(self): \"\"\"Deletes pif file from disk.\"\"\" try: os.remove(self.pid_file) except OSError:", "one of %s\" % extensions) @staticmethod def read_json(fname, check_extension=False): \"\"\"Reads JSON object from", "None if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with OpenFile(fname, 'r') as fobj: return json.load(fobj)", "expected. Warn if key value is suspicious - we can do it only", "dictionary based on configurationi in **config** :param dict param_info: A parameter info dictionary", "keys. \"\"\" if keys is None: return dictionary return dict((k, dictionary[k]) for k", "float frequency: A sampling frequency in seconds. Can be something like 0.1 seconds", "multiple files. This is intended behaviour for now (this also applies for update_param_info", "\" Parameter has invalid type = '%s'.\" \" Parameter definition is %s =", "define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\" import os import copy import json import", "not in ('int', 'str', 'float', 'bool'): raise ConfigurationError( \"Parameter info update error.\" \"", "we can do it only for root. if is_root and key not in", "script. :param str pid_folder: A full path to folder where pid file is", "JSON parse result. \"\"\" if 'parameters' not in config: return params = config['parameters']", "under the License. \"\"\"Two classes are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\" import", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "with value 16 OR 32. :param dict dictionary: Dictionary to match. :param dict", "result of parsing a JSON configuration file. :param bool is_user_config: If True, the", "ConfigurationLoader(object): \"\"\"Loads experimenter configuration from multiple files.\"\"\" @staticmethod def load(path, files=None): \"\"\"Loads configurations", "are ignored. :param boolean ignore_erros: If true, ignore errors. :raises ConfigurationError: If *must_match*", "parse result. \"\"\" if 'parameters' not in config: return params = config['parameters'] for", "dictionary contains all keys in 'keys' :param dict dictionary: Input dictionary. :param list_or_val", "item separator. \"\"\" for key in dictionary: if isinstance(dictionary[key], list): dictionary[key] = separator.join(str(elem)", "items from *iterable* object. This object must support ``for something in iterable`` (list,", "pid to write. This is a debugging function and most likely should not", ":rtype: str :return: The 'file_name' if this file does not exist else find", "param_info: param_info[name] = { 'val': val, 'type': val_type, 'desc': \"No description for this", "for key in source: # Firstly, check that type of value is expected.", "\"\"\"Create queue and start resource monitor in background thread. Due to possible execution", "name that file does not exist. \"\"\" if not os.path.exists(file_name): return file_name attempt", "or not isinstance(value, basestring): values = value if isinstance(value, list) else [value] if", "are lists, then condition OR applies. For instance: match(dictionary, query = { \"framework\":", "file to serialie dictionary in. \"\"\" if file_name is not None: IOUtils.mkdirf(file_name) with", "disk.\"\"\" try: os.remove(self.pid_file) except OSError: pass def empty_pid_file(self): \"\"\"Empty pid file.\"\"\" try: with", "try: with open(self.pid_file, 'w'): pass except IOError: pass def write_pid_file(self, pid): \"\"\"Write the", "'bool': v = str_val.lower() assert v in ('true', 'false', '1', '0', 'on', 'off'),\\", "raise ValueError(\"File name is None\") if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname,", "If *remove_prefix* is True, keys in new dictionary will not contain this prefix.", "to dictionary\", key, str(value)) except ValueError as err: if not ignore_errors: raise ConfigurationError(\"Cannot", "source, is_root=True): \"\"\"Merge **source** dictionary into **dest** dictionary assuming source and dest are", "= raw_field.split(':') assert len(fields_split) in (3, 4),\\ \"Invalid format of field specification (%s).", "dictionary assuming source and dest are JSON configuration configs or their members. :param", "if value == '': # Take special care if value is an empty", "'%s'.\" \" Parameter definition is %s = %s\" % (param_info[name]['type'], name, param_info[name]) )", "a result of parsing a JSON configuration file. :return: A copy of **config**", "from this dictionary. :param bool is_root: True if **dest** and *source** are root", "Firstly, check that type of value is expected. val_type = type(source[key]).__name__ if not", "[keys] for key in keys: if key not in dictionary: return False return", "% path) if files is not None: config_files = [os.path.join(path, f) for f", "with *prefix*. If *remove_prefix* is True, keys in new dictionary will not contain", "parameters or induced automatically based on JSON parse result. \"\"\" if 'parameters' not", "value\\ is not a json-parseable string. \"\"\" matcher = re.compile(pattern) for line in", "group in enumerate(match.groups()): matches['%s_%d' % (field, index+1)] = group continue return True class", "is True, keys in new dictionary will not contain this prefix. The dictionary", ":return: List of file names satisfying *file_name_pattern* pattern. \"\"\" files = [] for", "object (list, opened file name etc). :param str patter: A regexp pattern for", "with the License. # You may obtain a copy of the License at", "return False else: if matches is not None: matches['%s_0' % (field)] = dictionary[field]", "to populate a dictionary with key-values from log files. :param dict dictionary: Dictionary", "matcher.match(line) if not match: if must_match: raise ConfigurationError(\"Cannot match key-value from '%s' with", "else: match = re.compile(value).match(dictionary[field]) if not match: return False else: if matches is", "return (config_files, config, param_info) @staticmethod def update_param_info(param_info, config, is_user_config=False): \"\"\"Update parameter info dictionary", "to be extracted. :param bool remove_prefix: If True, remove prefix in returned dictionary.", "else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count) ]) return metrics def", "return False if matches is not None: matches['%s_0' % (field)] = dictionary[field] else:", "definition (value) is %s\" % (name, str(param_info[name]), val) ) if isinstance(val, dict): #", "used for matching. It's assuemd we match primitive types such as numbers and", "if value != dictionary[field]: return False elif matches is not None: matches['%s_0' %", "in dest: # The key in source dictionary is not in destination dictionary.", "is reading its output and will put the data into a queue. A", "in path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif os.path.isfile(path_spec): files.append(path_spec) return files @staticmethod", "to be same and one of %s but\" \" Dest(key=%s, val_type=%s) <- Source(key=%s,", "entries in path_specs that are directories. :param bool recursively: If True, search in", "on JSON parse result. \"\"\" if 'parameters' not in config: return params =", "pattern %s\" raise ValueError(msg % file_name) @staticmethod def check_file_extensions(fname, extensions): \"\"\"Checks that fname", "of the provided extensions. :param str fname: The file name to check. :param", "type and description? else: logging.warn( \" Parameter (%s) entirely redefines existing parameter (%s).\"", "\"Parameter definition does not contain type ('type') and/or description ('desc').\" \" You should", "does not exist, it adds a new item with value *default_value*. The dictionary", "be None.\") if not os.path.isdir(path): raise ValueError(\"Configuration load error. The 'path' parameter (%s)", "= '%s'.\" \" Parameter definition is %s = %s\" % (param_info[name]['type'], name, param_info[name])", "matched. In this case, the intersection of keys in dictionary and query is", "dest are JSON configuration configs or their members. :param dict dest: Merge data", "extensions. :param str fname: The file name to check. :param tuple extensions: A", "shrd cpu mem power gpus_power while not self.queue.empty(): data = self.queue.get().strip().split() for field", "this case, the intersection of keys in dictionary and query is used for", "be imported, certain system information will not be available\", module_name) have_module = False", "is `proc.pid`. :param float frequency: A sampling frequency in seconds. Can be something", "something is missing. \"\"\" HAVE_NUMPY = _ModuleImporter.try_import('numpy') HAVE_PANDAS = _ModuleImporter.try_import('pandas') HAVE_MATPLOTLIB = _ModuleImporter.try_import('matplotlib')", "law or agreed to in writing, software # distributed under the License is", "None :rtype: bool \"\"\" if query is None: return True assert policy in", "key-values from log files. :param dict dictionary: Dictionary to update in-place. :param obj", "isinstance(dest[key], dict) and isinstance(source[key], dict) both_lists = isinstance(dest[key], list) and isinstance(source[key], list) both_primitive", "@staticmethod def dump_json_to_file(dictionary, file_name): \"\"\" Dumps *dictionary* as a json object to a", "If policy is 'strict', every key in query must exist in dictionary with", "elif count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for index in", "file name to check. :param tuple extensions: A tuple of extensions to use.", "names starts with *prefix*. If *remove_prefix* is True, keys in new dictionary will", "has been succesfull or not. Used by a Modules class to identify if", "In this case, the intersection of keys in dictionary and query is used", "= type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring, int, float, long)) if is_root: if", "'keys'. :param dict dictionary: Input dictionary. :param list_or_val keys: Keys to extract :rtype:", "of a value is 'list', converts this list into a string using separator", "a JSON configuration file. :return: A copy of **config** with info removed \"\"\"", "is responsible for launching/shutting down/communicating with external resource manager that monitors system resource", "user in a standard way as we define types for standard parameters or", "'type': val_type, 'desc': \"No description for this parameter provided (it was automatically converted", "'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'): raise ConfigurationError(", "types such as numbers and strings not lists or dictionaries. If values in", ":param int pid: A pid to write. This is a debugging function and", "basestring, int, float, long)} for key in source: # Firstly, check that type", "non-user configuration.\" \" This is a system configuration error that must not happen.\"", "DictUtils(object): \"\"\"Container for dictionary helpers.\"\"\" @staticmethod def subdict(dictionary, keys): \"\"\"Return subdictionary containing only", "\"framework\": \"tensorflow\" }, policy='relaxed') Match dictionary if it does not contain key 'framework'", "using separator *separator*. The dictictionary is modified in-place. :param dict dictionary: Dictionary to", "self.queue = Queue() self.monitor_process = Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency, self.queue) ) self.monitor_process.start()", "members. :param dict dest: Merge data to this dictionary. :param dict source: Merge", "val: raise ConfigurationError( \"Parameter info remove error.\" \" Parameter that is defined by", ":param str launcher: A full path to resource monitor script. :param str pid_folder:", "ConfigurationError( \"Configuration update error - expecting value types to be same and one", "all keys in 'keys' :param dict dictionary: Input dictionary. :param list_or_val keys: Keys", "ValueError as err: if not ignore_errors: raise ConfigurationError(\"Cannot parse JSON string '%s' with", "expected is one of '%s'\", key, schema[val_type]) if key not in dest: #", "update error - expecting value types to be same and one of %s", "value types to be same and one of %s but\" \" Dest(key=%s, val_type=%s)", "self.fields.keys(): metrics[key] = [] # What's in output: # proc_pid date virt res", "dict :return: Dictionary that contains key/value pairs for key in keys. \"\"\" if", "directory to search files in. :param str file_name_pattern: A file name pattern to", "its default value. Found this definition: %s=%s\" % (name, val) ) if name", "this. Parameter definition is\" \" %s = %s\", name, param_info[name] ) @staticmethod def", "in dictionary) @staticmethod def contains(dictionary, keys): \"\"\"Checkes if dictionary contains all keys in", "in compliance with the License. # You may obtain a copy of the", "can write to this file. If path to parent folder does not exist,", "with info removed \"\"\" clean_config = copy.deepcopy(config) if 'parameters' in clean_config: params =", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "tuple.\" if not fname.endswith(extensions): raise ValueError(\"Invalid file extension (%s). Must be one of", "cpu mem power gpus_power while not self.queue.empty(): data = self.queue.get().strip().split() for field in", "importlib.import_module(module_name) except ImportError: logging.warn(\"Module '%s' cannot be imported, certain system information will not", "str directory: A directory to search files in. :param str file_name_pattern: A file", "experimenter configuration from multiple files.\"\"\" @staticmethod def load(path, files=None): \"\"\"Loads configurations (normally in", "f.endswith('.json')] config = {} # Configuration with params/vars/extensions param_info = {} # Information", "check. :param str key: A key that must exist. :param obj default_value: Default", "are in dictionary or keys is None \"\"\" if keys is None: return", ":param dict dictionary: Dictionary to check. :param str key: A key that must", "items which keys start with *prefix*. Creates new dictionary with items from *dictionary*", "return_dictionary = {} for key in dictionary: if key.startswith(prefix): return_key = key[len(prefix):] if", "def update(dest, source, is_root=True): \"\"\"Merge **source** dictionary into **dest** dictionary assuming source and", "*iterable* object. This object must support ``for something in iterable`` (list, opened file", "list) and isinstance(source[key], list) both_primitive = type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring, int,", "key: A key that must exist. :param obj default_value: Default value for key", "the intersection of keys in dictionary and query is used for matching. It's", "if 'val' not in val: raise ConfigurationError( \"Parameter info update error.\" \" Parameter", "with new ones if key already exists. One use case to use this", "iterable: Iterable object (list, opened file name etc). :param str patter: A regexp", "exist. \"\"\" if key not in dictionary: dictionary[key] = copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "policy='strict') Match dictionary only if it contains key 'framework' with value \"tensorflow\" OR\\", "Process from multiprocessing import Queue from glob import glob from dlbs.exceptions import ConfigurationError", "boolean variable indicating if import has been succesfull or not. Used by a", "parsed and added to dictionary\", key, str(value)) except ValueError as err: if not", "of fname is None else JSON loaded from the file. \"\"\" if fname", "key names. Types must always match, else exception is thrown. if is_root: schema", "deal with it in slightly different way. If parameter in `config` exists in", "dictionary with items from *dictionary* which keys names starts with *prefix*. If *remove_prefix*", "\"\"\" have_module = True try: importlib.import_module(module_name) except ImportError: logging.warn(\"Module '%s' cannot be imported,", "dictionary or keys is None \"\"\" if keys is None: return True keys", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "value for this parameter. Types of user defined parameters are defined either by", "configuration configs or their members. :param dict dest: Merge data to this dictionary.", "{'types':(list, basestring, int, float, long)} for key in source: # Firstly, check that", "self.monitor_process = None # Parse fields specs # time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {} raw_fields", "\"\"\"In parameter section of a **config** the function removes parameter info leaving only", "launcher, pid_folder, frequency, fields_specs): \"\"\"Initializes resource monitor but does not create queue and", "data to dump into a JSON file. :param str file_name: Name of a", "fails if one parameter is defined in multiple files. This is intended behaviour", "search. For instance, is can be '*.log' :param bool recursively: If True, search", "it will be created. See documentation for :py:func:`os.makedirs` for more details. :param str", "if one parameter is defined in multiple files. This is intended behaviour for", "not a file (%s)\" % config_file) with open(config_file) as file_obj: try: # A", "return clean_config @staticmethod def update(dest, source, is_root=True): \"\"\"Merge **source** dictionary into **dest** dictionary", "that monitors system resource consumption. proc_pid date virt res shrd cpu mem power", "params: val = params[name] if isinstance(val, dict): # This should not generally happen", "% pid) def run(self): \"\"\"Create queue and start resource monitor in background thread.", "a complete parameter definition with name, value and description. if 'val' not in", "A directory to search files in. :param str file_name_pattern: A file name pattern", "to dump into a JSON file. :param str file_name: Name of a file", "index, group in enumerate(match.groups()): matches['%s_%d' % (field, index+1)] = group continue return True", "its parent directory exists. \"\"\" dir_name = os.path.dirname(file_name) if dir_name != '' and", "(%s). Must be name:type:index, name:type:index: or name:type:index:count\" % raw_field field_name = fields_split[0] assert", "recursively: If True, search in subdirectories. :return: List of file names satisfying *file_name_pattern*", "isinstance(dictionary[key], list): dictionary[key] = separator.join(str(elem) for elem in dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary, prefix,", "directory, possibly, recursively. Find files which names satisfy *file_name_pattern* pattern in folder *directory*.", "'float', 'bool'),\\ \"Invalid field type (%s). Must be one of ('str', 'int', 'float',", "here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\" import os import copy import json import gzip", "based on JSON parse result. \"\"\" if 'parameters' not in config: return params", "json.dumps(param_info[name]) ) param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its", "is can be '*.log' :param bool recursively: If True, search in subdirectories. :return:", "schema[val_type]) if key not in dest: # The key in source dictionary is", "\"Invalid format of field specification (%s). Must be name:type:index, name:type:index: or name:type:index:count\" %", "but deal with it in slightly different way. If parameter in `config` exists", "file names satisfying *file_name_pattern* pattern. \"\"\" files = [] for path_spec in path_specs:", "isinstance(val, dict): # This should not generally happen since we deal with it", "elif val_type == 'bool': v = str_val.lower() assert v in ('true', 'false', '1',", "for more details. :param str file_name: A name of the file for which", "if matches is not None: matches['%s_0' % (field)] = dictionary[field] for index, group", "directories. :param str file_name_pattern: A file name pattern to search. Only used for", "(1 and 2). First group is considered as a key, and second group", "json-parseable strings. If *add_only_keys* is not None, only those items are added to", "copy.deepcopy(config) if 'parameters' in clean_config: params = clean_config['parameters'] for name in params: val", "list) else [keys] for key in keys: if key not in dictionary: return", "dictionary. :param bool is_root: True if **dest** and *source** are root configuration objects.", "metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp)) elif count == 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for", "A name of a module to try to import, something like 'numpy', 'pandas',", "if 'parameters' not in config: return params = config['parameters'] for name in params:", "import Process from multiprocessing import Queue from glob import glob from dlbs.exceptions import", "for f in files] else: config_files = [os.path.join(path, f) for f in os.listdir(path)", "*ConfigurationError* exception is thrown. Regexp pattern must return two groups (1 and 2).", "parameter name to its description dictionary that contains such fileds as value, help", "fields_split[3] == '': count = 0 else: count = int(fields_split[3]) self.fields[field_name] = {", "this project depends on. They are optional, so we can disable certain functionality", "**dest** and *source** are root configuration objects. False if these objects are members.", "otherwise. \"\"\" have_module = True try: importlib.import_module(module_name) except ImportError: logging.warn(\"Module '%s' cannot be", "for k in keys if k in dictionary) @staticmethod def contains(dictionary, keys): \"\"\"Checkes", "\"caffe2\"], \"batch\": [16, 32] }, policy='strict') Match dictionary only if it (a) contains", "Merge data to this dictionary. :param dict source: Merge data from this dictionary.", "value else: # Just parameter value val_type = 'str' if isinstance(val, basestring) or", "dict) and isinstance(source[key], dict) both_lists = isinstance(dest[key], list) and isinstance(source[key], list) both_primitive =", "path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif os.path.isfile(path_spec): files.append(path_spec) return files @staticmethod def", "be name:type:index, name:type:index: or name:type:index:count\" % raw_field field_name = fields_split[0] assert field_name not", "key in query must exist in dictionary with the same value to match.", "Dictionary to modify. :param str separator: An item separator. \"\"\" for key in", "\"tensorflow\" OR \"caffe2\"\\ and (b) it contains key 'batch' with value 16 OR", "from this single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration load error. Invalid JSON", "type, constraints etc. :param dict config: A dictionary with configuration section that may", "= { \"framework\": \"tensorflow\" }, policy='relaxed') Match dictionary if it does not contain", "3: count = -1 elif fields_split[3] == '': count = 0 else: count", "dictionary only if it contains key 'framework' with value \"tensorflow\". match(dictionary, query =", "documentation for :py:func:`os.makedirs` for more details. :param str file_name: A name of the", "is a system configuration error that must not happen.\" \" Parameter %s=%s, new", "file_name: Input file name. :rtype: str :return: The 'file_name' if this file does", "if dictionary contains all keys in 'keys' :param dict dictionary: Input dictionary. :param", "but does not create queue and process. :param str launcher: A full path", "dictionary[k]) for k in keys if k in dictionary) @staticmethod def contains(dictionary, keys):", "OR \"caffe2\"\\ and (b) it contains key 'batch' with value 16 OR 32.", "defined by a dictionary must contain 'val' field that\" \" defines its default", "while True: output = process.stdout.readline() if output == '' and process.poll() is not", "starts with *prefix*. If *remove_prefix* is True, keys in new dictionary will not", "item with value *default_value*. The dictionary is modified in-place. :param dict dictionary: Dictionary", "def get_measurements(self): \"\"\"Dequeue all data, put it into lists and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8-", "of global configuration from this particular file config_section = json.load(file_obj) # Update parameters", "system information will not be available\", module_name) have_module = False return have_module class", "query to be matched. In this case, the intersection of keys in dictionary", "(%s). Must be one of ('str', 'int', 'float', 'bool')\" % field_type index =", "but expected is one of '%s'\", key, schema[val_type]) if key not in dest:", "if fname is None: return assert isinstance(extensions, tuple), \"The 'extensions' must be a", "(basestring, int, float, long)) if is_root: if not both_dicts and not both_lists: _raise_types_mismatch_config_error(key,", "Configuration with params/vars/extensions param_info = {} # Information on params such as type", "both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]') if both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False) else: dest[key].extend(source[key])", "source[key], is_root=False) else: dest[key].extend(source[key]) else: if not both_lists and not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]),", "if path is None: raise ValueError(\"Configuration load error. The 'path' parameter cannot be", "contains such fileds as value, help message, type, constraints etc. :param dict config:", "consumption. proc_pid date virt res shrd cpu mem power gpus_power \"\"\" def __init__(self,", "members. \"\"\" def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types): raise ConfigurationError( \"Configuration update error -", "the License. \"\"\"Two classes are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\" import os", "`inputs` parameter. :param list path_specs: A list of file names / directories. :param", "``for something in iterable`` (list, opened file etc). Only those items in *iterable*", "is None: return True assert policy in ['relaxed', 'strict'], \"\" for field, value", "or 'desc' not in param_info[name]: logging.warn( \"Parameter definition does not contain type ('type')", "= copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary, separator=' '): \"\"\" Converts every value in dictionary", "monitor to finish.\"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file()", "in subdirectories. Only used for entries in path_specs that are directories. :return: List", "extensions. The **config** is a result of parsing a JSON configuration file. :return:", "of file names satisfying *file_name_pattern* pattern. \"\"\" files = [] for path_spec in", "virt res shrd cpu mem power gpus_power while not self.queue.empty(): data = self.queue.get().strip().split()", "value and description. if 'val' not in val: raise ConfigurationError( \"Parameter info update", "if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif os.path.isfile(path_spec): files.append(path_spec) return files @staticmethod def get_non_existing_file(file_name,", "logging import subprocess import importlib from multiprocessing import Process from multiprocessing import Queue", "in (3, 4),\\ \"Invalid format of field specification (%s). Must be name:type:index, name:type:index:", "keys start with *prefix*. Creates new dictionary with items from *dictionary* which keys", "not match, raises exception. :param list add_only_keys: If not None, specifies keys that", "tp = self.fields[field]['type'] idx = self.fields[field]['index'] count = self.fields[field]['count'] if count == -1:", "if key not in dictionary: dictionary[key] = copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary, separator=' '):", "\"\"\" cmd = [ launcher, pid_file, '', str(frequency) ] process = subprocess.Popen(cmd, universal_newlines=True,", "_raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types): raise ConfigurationError( \"Configuration update error - expecting value types", "in ('true', 'false', '1', '0', 'on', 'off'),\\ \"Invalid boolean value in string (%s)\"", "list) else [value] if dictionary[field] not in values: return False if matches is", "format of field specification (%s). Must be name:type:index, name:type:index: or name:type:index:count\" % raw_field", "if not both_lists and not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int, float,", "field not in dictionary: if policy == 'relaxed': continue else: return False if", ":param multiprocessing.Queue queue: A queue to communicate measurements. A resource monitor is launched", "parameters in existing param_info differently. See comments below. We are interested here only", "Path to load configurations from :param list files: List of file names to", "to serialize. :param str file_name: Name of a file to serialie dictionary in.", "monitor is launched as a subprocess. The thread is reading its output and", "for entries in path_specs that are directories. :param bool recursively: If True, search", "empty string if value != dictionary[field]: return False elif matches is not None:", "full path to folder where pid file is created. The file name is", "{} raw_fields = fields_specs.split(',') for raw_field in raw_fields: fields_split = raw_field.split(':') assert len(fields_split)", "If this is not a user-provided configuration, we disallow parameter redefinition. if name", ":param tuple extensions: A tuple of extensions to use. Raises exception of fname", "True, search in subdirectories. :return: List of file names satisfying *file_name_pattern* pattern. \"\"\"", "dict query: Query to use. :param ['relaxed', 'strict'] policy: Policy to match. :param", ":param dict dictionary: Dictionary to modify. :param str separator: An item separator. \"\"\"", "file name pattern to search. For instance, is can be '*.log' :param bool", "in dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates new dictionary with items which", "the function removes parameter info leaving only their values :param dict config: A", "variables and extensions. The **config** is a result of parsing a JSON configuration", ":param bool recursively: If True, search in subdirectories. :return: List of file names", "str separator: An item separator. \"\"\" for key in dictionary: if isinstance(dictionary[key], list):", "depends on. They are optional, so we can disable certain functionality if something", "matches is not None: matches['%s_0' % (field)] = dictionary[field] else: if value ==", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "object. This object must support ``for something in iterable`` (list, opened file etc).", "v = str_val.lower() assert v in ('true', 'false', '1', '0', 'on', 'off'),\\ \"Invalid", "dest[key].extend(source[key]) else: if not both_lists and not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring,", "A copy of **config** with info removed \"\"\" clean_config = copy.deepcopy(config) if 'parameters'", "write. This is a debugging function and most likely should not be used.", "(list, opened file etc). Only those items in *iterable* are considered, that match", "to a file with *file_name* name. :param dict dictionary: Dictionary to serialize. :param", "key in keys: if key not in dictionary: return False return True @staticmethod", "root key is '%s' but expected is one of '%s'\", key, schema[val_type]) if", "except IOError: pass def write_pid_file(self, pid): \"\"\"Write the pid into pid file. :param", "as fhandle: fhandle.write('%d' % pid) def run(self): \"\"\"Create queue and start resource monitor", "modified in-place. :param dict dictionary: Dictionary to check. :param str key: A key", ":param dict dictionary: Dictionary to search keys in. :param str prefix: Prefix of", "type of value is expected. val_type = type(source[key]).__name__ if not isinstance(source[key], schema['types']): raise", "file names / directories. :param str file_name_pattern: A file name pattern to search.", "patter: A regexp pattern for matching items in ``iterable``. :param bool must_match: Specifies", "and dest are JSON configuration configs or their members. :param dict dest: Merge", "params such as type and help messages for config_file in config_files: if not", "something like 0.1 seconds \"\"\" self.launcher = launcher self.pid_file = os.path.join(pid_folder, 'proc.pid') self.frequency", "to make sure\\ its parent directory exists. \"\"\" dir_name = os.path.dirname(file_name) if dir_name", "extensions. The **config** is a result of parsing a JSON configuration file. :param", "comments below. We are interested here only in parameters section where parameter information", "exception is thrown. if is_root: schema = {'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']} else:", "def __init__(self, fname, mode='r'): self.__fname = fname self.__flags = ['rb', 'r'] if mode", "possible execution of benchmarks in containers, we must not delete file here, but", "created. The file name is fixed and its value is `proc.pid`. :param float", "is defined in multiple files. This is intended behaviour for now (this also", "resource monitor in background thread. Due to possible execution of benchmarks in containers,", "pid_folder, frequency, fields_specs): \"\"\"Initializes resource monitor but does not create queue and process.", "True: output = process.stdout.readline() if output == '' and process.poll() is not None:", "%s=%s, new parameter definition (value) is %s\" % (name, str(param_info[name]), val) ) if", "file_name attempt = 0 while True: candidate_file_name = \"%s.%d\" % (file_name, attempt) if", "be value. Values must be a json-parseable strings. If *add_only_keys* is not None,", "files.\"\"\" def __init__(self, fname, mode='r'): self.__fname = fname self.__flags = ['rb', 'r'] if", "not contain key 'framework' OR contains\\ key 'framework' with value \"tensorflow\". match(dictionary, query", "are considered, that match *pattern* (it's a regexp epression). If a particular item", "'*.log' :param bool recursively: If True, search in subdirectories. :return: List of file", "match, raises exception. :param list add_only_keys: If not None, specifies keys that are", "def find_files(directory, file_name_pattern, recursively=False): \"\"\"Find files in a directory, possibly, recursively. Find files", "JSON configuration file. :return: A copy of **config** with info removed \"\"\" clean_config", "not ignore_errors: raise ConfigurationError(\"Cannot parse JSON string '%s' with key '%s' (key-value definition:", "this parameter. Types of user defined parameters are defined either by user in", "Take special care if value is an empty string if value != dictionary[field]:", "def write_pid_file(self, pid): \"\"\"Write the pid into pid file. :param int pid: A", "the pid into pid file. :param int pid: A pid to write. This", "JSON object from file 'fname'. :param str fname: File name. :param boolean check_extension:", "*default_value*. The dictionary is modified in-place. :param dict dictionary: Dictionary to check. :param", ":return: Dictionary that contains key/value pairs for key in keys. \"\"\" if keys", "fileds as value, help message, type, constraints etc. :param dict config: A dictionary", "None: raise ValueError(\"File name is None\") if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname) with", "this file except in compliance with the License. # You may obtain a", "= json.load(file_obj) # Update parameters info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) # Joing configuration from", "only if it contains key 'framework' with value \"tensorflow\" OR\\ \"caffe2\". match(dictionary, query", "key '%s' (key-value definition: '%s'). Error is %s\" % (value, key, line, str(err)))", "# script. It's a whitespace separated string of numbers. queue.put(output.strip()) @staticmethod def str_to_type(str_val,", "list into a string using separator *separator*. The dictictionary is modified in-place. :param", "opened file name etc). :param str patter: A regexp pattern for matching items", "existing parameters in param_info (already loaded params), program terminates. 2. Load user-provided configuration.", "Policy to match. :param dict matches: Dictionary where matches will be stored if", "parameter info dictionary based on configurationi in **config** :param dict param_info: A parameter", "entries in path_specs that are directories. :return: List of file names satisfying *file_name_pattern*", "\"\"\"Dequeue all data, put it into lists and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary", "name. :rtype: str :return: The 'file_name' if this file does not exist else", "is_root and key not in schema[val_type]: logging.warn(\"The name of a root key is", "*dictionary* are overwritten with new ones if key already exists. One use case", "in param_info: param_info[name] = copy.deepcopy(val) # New parameter, set it info object. #", "global configuration from this particular file config_section = json.load(file_obj) # Update parameters info.", "else: schema = {'types':(list, basestring, int, float, long)} for key in source: #", "'batch' with value 16 OR 32. :param dict dictionary: Dictionary to match. :param", "p in os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))] return files @staticmethod def gather_files(path_specs,", "dict dictionary: Dictionary to check. :param str key: A key that must exist.", "v in ('true', 'false', '1', '0', 'on', 'off'),\\ \"Invalid boolean value in string", "ensure that we can write to this file. If path to parent folder", "def filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates new dictionary with items which keys start with", "(value) is %s\" % (name, str(param_info[name]), val) ) if isinstance(val, dict): # This", "frequency: A sampling frequency in seconds. Can be something like 0.1 seconds \"\"\"", "= True try: importlib.import_module(module_name) except ImportError: logging.warn(\"Module '%s' cannot be imported, certain system", "*remove_prefix* is True, keys in new dictionary will not contain this prefix. The", "ConfigurationError( \"Parameter info update error.\" \" Parameter has invalid type = '%s'.\" \"", "str key: A key that must exist. :param obj default_value: Default value for", "ConfigurationError( \"Parameter info update error.\" \" Parameter that is defined by a dictionary", "metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count) ]) return metrics def remove_pid_file(self):", "file %s\", config_file) raise return (config_files, config, param_info) @staticmethod def update_param_info(param_info, config, is_user_config=False):", "separator. \"\"\" for key in dictionary: if isinstance(dictionary[key], list): dictionary[key] = separator.join(str(elem) for", "a particular item does not match, and *must_match* is True, *ConfigurationError* exception is", "type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring, int, float, long)) if is_root: if not", "will be stored if match has been identified. :return: True if match or", "item in *dictionary*, if type of a value is 'list', converts this list", "launched as a subprocess. The thread is reading its output and will put", "else: param_info[name]['val'] = val # Do final validations if 'type' in param_info[name] and", "if key not in dest: # The key in source dictionary is not", "from multiple files.\"\"\" @staticmethod def load(path, files=None): \"\"\"Loads configurations (normally in `conigs`) folder.", "val # Do final validations if 'type' in param_info[name] and param_info[name]['type'] not in", ") self.monitor_process.start() def stop(self): \"\"\"Closes queue and waits for resource monitor to finish.\"\"\"", "case, parameter redefinition is prohibited. If `parameters` section in `config` redefines existing parameters", "For every item in *dictionary*, if type of a value is 'list', converts", "('.json', '.json.gz')) with OpenFile(fname, 'r') as fobj: return json.load(fobj) @staticmethod def write_json(fname, data,", "main thread will then dequeue all data at once once experiment is completed.", "> 0 else None if add_only_keys is None or key in add_only_keys: dictionary[key]", "(3, 4),\\ \"Invalid format of field specification (%s). Must be name:type:index, name:type:index: or", "{} # Information on params such as type and help messages for config_file", "fname does not end with '.json' or '.json.gz'. :rtype: None or JSON object", "parameter. :param list path_specs: A list of file names / directories. :param str", "= [ launcher, pid_file, '', str(frequency) ] process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)", "# Information on params such as type and help messages for config_file in", "logging.warn( \"Parameter definition does not contain type ('type') and/or description ('desc').\" \" You", "root configuration objects. False if these objects are members. \"\"\" def _raise_types_mismatch_config_error(key, dest_val_type,", "every element in *iterable* must match\\ *pattern*. If True and not match, raises", "matches['%s_0' % (field)] = dictionary[field] continue else: match = re.compile(value).match(dictionary[field]) if not match:", "fields_specs): \"\"\"Initializes resource monitor but does not create queue and process. :param str", "available\", module_name) have_module = False return have_module class Modules(object): \"\"\"A class that enumerates", "matcher = re.compile(pattern) for line in iterable: match = matcher.match(line) if not match:", "and isinstance(dest[key], (basestring, int, float, long)) if is_root: if not both_dicts and not", "for index in xrange(idx, idx+count) ]) return metrics def remove_pid_file(self): \"\"\"Deletes pif file", "'val': val, 'type': val_type, 'desc': \"No description for this parameter provided (it was", "'keys' :param dict dictionary: Input dictionary. :param list_or_val keys: Keys to find in", "(value, key, line, str(err))) @staticmethod def match(dictionary, query, policy='relaxed', matches=None): \"\"\" Match *query*", "'framework' with value \"tensorflow\". match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='relaxed') Match", "== 0: metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)]) else: metrics[field].append([ ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count)", "key in keys. \"\"\" if keys is None: return dictionary return dict((k, dictionary[k])", "copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod def dump_json_to_file(dictionary, file_name): \"\"\" Dumps *dictionary* as a json", "of ('str', 'int', 'float', 'bool')\" % field_type index = int(fields_split[2]) if len(fields_split) ==", "this case, we still update parameter info structure, but deal with it in", "if this file does not exist else find first file name that file", "of file names to load. If None, all files with JSON extension in", "if fname is None: raise ValueError(\"File name is None\") if check_extension: IOUtils.check_file_extensions(fname, ('.json',", "dictionary only if it contains key 'framework' with value \"tensorflow\" OR\\ \"caffe2\". match(dictionary,", "if isinstance(keys, list) else [keys] for key in keys: if key not in", "a **config** the function removes parameter info leaving only their values :param dict", "\"\"\" Updates *dictionary* with items from *iterable* object. This method modifies/updates *dictionary* with", "class is responsible for launching/shutting down/communicating with external resource manager that monitors system", "in dictionary: if isinstance(dictionary[key], list): dictionary[key] = separator.join(str(elem) for elem in dictionary[key]) @staticmethod", "'on', 'off'),\\ \"Invalid boolean value in string (%s)\" % str_val return v in", "}, policy='strict') Match dictionary only if it (a) contains key 'framework' with value", "exist. :param obj default_value: Default value for key if it does not exist.", "in param_info: raise ConfigurationError( \"Parameter info update error.\" \" Parameter redefinition is not", "Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency, self.queue) ) self.monitor_process.start() def stop(self): \"\"\"Closes queue and", "as we define types for standard parameters or induced automatically based on JSON", "self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object): \"\"\"A private class that imports a particular models and", "them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that maps metric field to a time series of", "New dictionary with items which keys names start with *prefix*. \"\"\" return_dictionary =", "self.__fobj.close() class IOUtils(object): \"\"\"Container for input/output helpers\"\"\" @staticmethod def mkdirf(file_name): \"\"\"Makes sure that", "configuration from this particular file config_section = json.load(file_obj) # Update parameters info. ConfigurationLoader.update_param_info(param_info,", "name:type:index:count\" % raw_field field_name = fields_split[0] assert field_name not in self.fields,\\ \"Found duplicate", "tuple consisting of a list of config files, configuration object (dictionary) and dictionary", "way. If parameter in `config` exists in param_info, it means user has provided", "key is '%s' but expected is one of '%s'\", key, schema[val_type]) if key", ":py:func:`os.makedirs` for more details. :param str file_name: A name of the file for", "If *add_only_keys* is not None, only those items are added to *dictionary*, that", "not lists or dictionaries. If values in query are lists, then condition OR", "'%s' with key '%s' (key-value definition: '%s'). Error is %s\" % (value, key,", "is %s = %s\" % (param_info[name]['type'], name, param_info[name]) ) if 'type' not in", "\"caffe2\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"], \"batch\": [16, 32] }, policy='strict')", "from pattern %s\" raise ValueError(msg % file_name) @staticmethod def check_file_extensions(fname, extensions): \"\"\"Checks that", "\"No description for this parameter provided (it was automatically converted from its value).\"", "all keys are in dictionary or keys is None \"\"\" if keys is", "so we can disable certain functionality if something is missing. \"\"\" HAVE_NUMPY =", "return candidate_file_name attempt += 1 if attempt >= max_attempts: msg = \"Cannot find", ":param str fname: The file name to check. :param tuple extensions: A tuple", "str fname: The file name to check. :param tuple extensions: A tuple of", "= {} for key in self.fields.keys(): metrics[key] = [] # What's in output:", "It's assuemd we match primitive types such as numbers and strings not lists", "not None: matches['%s_0' % (field)] = dictionary[field] else: if value == '': #", "self.fields,\\ \"Found duplicate timeseries field (%s)\" % field_name field_type = fields_split[1] assert field_type", "to identify if optional python modules are available. \"\"\" @staticmethod def try_import(module_name): \"\"\"Tries", ") if name not in param_info: param_info[name] = copy.deepcopy(val) # New parameter, set", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "None: return assert isinstance(extensions, tuple), \"The 'extensions' must be a tuple.\" if not", "Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)\" % (valid_types, key, dest_val_type.__name__, key, src_val_type.__name__) ) #", "slightly different way. If parameter in `config` exists in param_info, it means user", "'file_name' if this file does not exist else find first file name that", "to communicate measurements. A resource monitor is launched as a subprocess. The thread", "the file. \"\"\" if fname is None: return None if check_extension: IOUtils.check_file_extensions(fname, ('.json',", "and not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int, float, long]') dest[key] =", "= str_val.lower() assert v in ('true', 'false', '1', '0', 'on', 'off'),\\ \"Invalid boolean", "keys names start with *prefix*. \"\"\" return_dictionary = {} for key in dictionary:", "from *iterable* object. This object must support ``for something in iterable`` (list, opened", "%s\", name, param_info[name] ) @staticmethod def remove_info(config): \"\"\"In parameter section of a **config**", "pid) def run(self): \"\"\"Create queue and start resource monitor in background thread. Due", "file exists. The file itself may not exist. A typical usage is to", "recursively. Find files which names satisfy *file_name_pattern* pattern in folder *directory*. If *recursively*", "query is None :rtype: bool \"\"\" if query is None: return True assert", "dictionary only if it (a) contains key 'framework' with value \"tensorflow\" OR \"caffe2\"\\", "not happen.\" \" Parameter %s=%s, new parameter definition (value) is %s\" % (name,", "complete parameter definition with name, value and description. if 'val' not in val:", "only their values :param dict config: A dictionary with configuration section that may", "val_type == 'float': return float(str_val) elif val_type == 'bool': v = str_val.lower() assert", "fname self.__flags = ['rb', 'r'] if mode == 'r' else ['wb', 'w'] def", "in iterable: match = matcher.match(line) if not match: if must_match: raise ConfigurationError(\"Cannot match", "If true, ignore errors. :raises ConfigurationError: If *must_match* is True and not match", "that\" \" defines its default value. Found this definition: %s=%s\" % (name, val)", "%s\" % \\ (str(is_root), key, val_type, str(schema['types'])) ) # So, the type is", "If *recursively* is True, scans subfolders as well. :param str directory: A directory", "parameters info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) # Joing configuration from this single file. ConfigurationLoader.update(config,", "Parameter (%s) entirely redefines existing parameter (%s).\" \" Normally, only value needs to", "this particular file config_section = json.load(file_obj) # Update parameters info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False)", "try_import(module_name): \"\"\"Tries to import module. :param str module_name: A name of a module", "== 'str': return str_val elif val_type == 'int': return int(str_val) elif val_type ==", "python modules this project depends on. They are optional, so we can disable", "value: \" \" is_root=%s, key=%s, value type=%s, expected type is one of %s\"", "if matches is not None: matches['%s_0' % (field)] = dictionary[field] else: if value", "like 0.1 seconds \"\"\" self.launcher = launcher self.pid_file = os.path.join(pid_folder, 'proc.pid') self.frequency =", "error. The 'path' parameter cannot be None.\") if not os.path.isdir(path): raise ValueError(\"Configuration load", "'' and process.poll() is not None: break if output: # The 'output' is", "A sampling frequency in seconds. Can be something like 0.1 seconds :param multiprocessing.Queue", "names satisfying *file_name_pattern* pattern. \"\"\" files = [] for path_spec in path_specs: if", ":param dict dictionary: Dictionary to serialize. :param str file_name: Name of a file", "List of file names satisfying *file_name_pattern* pattern. \"\"\" if not recursively: files =", "dictionary may not contain all keys from query to be matched. In this", "description for this parameter provided (it was automatically converted from its value).\" }", "required by applicable law or agreed to in writing, software # distributed under", "copy of **config** with info removed \"\"\" clean_config = copy.deepcopy(config) if 'parameters' in", "which keys start with *prefix*. Creates new dictionary with items from *dictionary* which", "'float': return float(str_val) elif val_type == 'bool': v = str_val.lower() assert v in", "index in xrange(idx, idx+count) ]) return metrics def remove_pid_file(self): \"\"\"Deletes pif file from", "val_type def get_measurements(self): \"\"\"Dequeue all data, put it into lists and return them.", "dictionary with items which keys names start with *prefix*. \"\"\" return_dictionary = {}", "is 'relaxed', dictionary may not contain all keys from query to be matched.", "of value is expected. val_type = type(source[key]).__name__ if not isinstance(source[key], schema['types']): raise ConfigurationError(", "isinstance(value, list) or not isinstance(value, basestring): values = value if isinstance(value, list) else", "to update in-place. :param obj iterable: Iterable object (list, opened file name etc).", "@staticmethod def str_to_type(str_val, val_type): if val_type == 'str': return str_val elif val_type ==", "update_param_info method). \"\"\" if path is None: raise ValueError(\"Configuration load error. The 'path'", "*file_name_pattern* pattern. \"\"\" if not recursively: files = [f for f in glob(os.path.join(directory,", "identified. :return: True if match or query is None :rtype: bool \"\"\" if", "in files] else: config_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.json')]", "redefinition is not allowed for non-user configuration.\" \" This is a system configuration", "based on configurationi in **config** :param dict param_info: A parameter info dictionary that", "name, value and description. if 'val' not in val: raise ConfigurationError( \"Parameter info", "self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object): \"\"\"A private class that imports a particular models", "0 while True: candidate_file_name = \"%s.%d\" % (file_name, attempt) if not os.path.exists(candidate_file_name): return", "only for root. if is_root and key not in schema[val_type]: logging.warn(\"The name of", ":py:class:`dlbs.DictUtils`. \"\"\" import os import copy import json import gzip import re import", "not in config: return params = config['parameters'] for name in params: val =", "then dequeue all data at once once experiment is completed. \"\"\" cmd =", "recursively)) elif os.path.isfile(path_spec): files.append(path_spec) return files @staticmethod def get_non_existing_file(file_name, max_attempts = 1000): \"\"\"Return", "# Update parameters info. ConfigurationLoader.update_param_info(param_info, config_section, is_user_config=False) # Joing configuration from this single", "pass def empty_pid_file(self): \"\"\"Empty pid file.\"\"\" try: with open(self.pid_file, 'w'): pass except IOError:", "of file names / directories. :param str file_name_pattern: A file name pattern to", "open(self.pid_file, 'w'): pass except IOError: pass def write_pid_file(self, pid): \"\"\"Write the pid into", "existing file from pattern %s\" raise ValueError(msg % file_name) @staticmethod def check_file_extensions(fname, extensions):", "field, value in query.iteritems(): if field not in dictionary: if policy == 'relaxed':", "input/output helpers\"\"\" @staticmethod def mkdirf(file_name): \"\"\"Makes sure that parent folder of this file", "pattern, must_match=True, add_only_keys=None, ignore_errors=False): \"\"\" Updates *dictionary* with items from *iterable* object. This", "Load standard configuration. In this case, parameter redefinition is prohibited. If `parameters` section", "glob from dlbs.exceptions import ConfigurationError class OpenFile(object): \"\"\"Class that can work with gzipped", "This method fails if one parameter is defined in multiple files. This is", "parent folder does not exist, it will be created. See documentation for :py:func:`os.makedirs`", "function removes parameter info leaving only their values :param dict config: A dictionary", "for name in params: val = params[name] if isinstance(val, dict): # This should", "string using separator *separator*. The dictictionary is modified in-place. :param dict dictionary: Dictionary", "dictionary with the same value to match. If policy is 'relaxed', dictionary may", "a file with *file_name* name. :param dict dictionary: Dictionary to serialize. :param any", "the config object represents user-provided configuration. If False, this is a system configuration.", "OpenFile(fname, 'r') as fobj: return json.load(fobj) @staticmethod def write_json(fname, data, check_extension=False): \"\"\" Dumps", "int, float, long)) if is_root: if not both_dicts and not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]),", "automatically based on JSON parse result. \"\"\" if 'parameters' not in config: return", "be matched. In this case, the intersection of keys in dictionary and query", "to a time series of its value. \"\"\" metrics = {} for key", "'', str(frequency) ] process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output =", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "file. :param str file_name: Name of a file to serialie dictionary in. \"\"\"", "one of %s\" % \\ (str(is_root), key, val_type, str(schema['types'])) ) # So, the", "elem in dictionary[key]) @staticmethod def filter_by_key_prefix(dictionary, prefix, remove_prefix=True): \"\"\"Creates new dictionary with items", "for launching/shutting down/communicating with external resource manager that monitors system resource consumption. proc_pid", "dictionary. :return: New dictionary with items which keys names start with *prefix*. \"\"\"", "This method loads configuration files located in 'path'. If `files` is empty, all", "is_user_config: # If this is not a user-provided configuration, we disallow parameter redefinition.", "of field specification (%s). Must be name:type:index, name:type:index: or name:type:index:count\" % raw_field field_name", ":param dict dictionary: Dictionary to update in-place. :param obj iterable: Iterable object (list,", "Dictionary to serialize. :param any data: A data to dump into a JSON", "Dictionary where matches will be stored if match has been identified. :return: True", "def update_param_info(param_info, config, is_user_config=False): \"\"\"Update parameter info dictionary based on configurationi in **config**", "every item in *dictionary*, if type of a value is 'list', converts this", "information is defined. There are two scenarios this method is used: 1. Load", "not both_dicts and not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]') if both_dicts: ConfigurationLoader.update(dest[key],", "# time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8: self.fields = {} raw_fields = fields_specs.split(',') for raw_field in raw_fields: fields_split", "standard way as we define types for standard parameters or induced automatically based", "= value logging.debug(\"Key-value item (%s=%s) has been parsed and added to dictionary\", key,", "start with *prefix*. \"\"\" return_dictionary = {} for key in dictionary: if key.startswith(prefix):", "path_specs that are directories. :param bool recursively: If True, search in subdirectories. Only", "are overwritten with new ones if key already exists. One use case to", "matches['%s_0' % (field)] = dictionary[field] else: if value == '': # Take special", "`config` exists in param_info, it means user has provided their specific value for", "def gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get files specified by an `inputs` parameter. :param list", "with items from *iterable* object. This object must support ``for something in iterable``", "return_dictionary[return_key] = copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod def dump_json_to_file(dictionary, file_name): \"\"\" Dumps *dictionary* as", "data = self.queue.get().strip().split() for field in self.fields: tp = self.fields[field]['type'] idx = self.fields[field]['index']", "dictionary must contain 'val' field that\" \" defines its default value. Found this", "section that may contain parameters, variables and extensions. The **config** is a result", "`parameters` section in `config` redefines existing parameters in param_info (already loaded params), program", "that are added into\\ *dictionary*. Others are ignored. :param boolean ignore_erros: If true,", "seconds :param multiprocessing.Queue queue: A queue to communicate measurements. A resource monitor is", "Parameter %s=%s, new parameter definition (value) is %s\" % (name, str(param_info[name]), val) )", "which keys names starts with *prefix*. If *remove_prefix* is True, keys in new", "%s but\" \" Dest(key=%s, val_type=%s) <- Source(key=%s, val_type=%s)\" % (valid_types, key, dest_val_type.__name__, key,", "that match *pattern* (it's a regexp epression). If a particular item does not", "string of numbers. queue.put(output.strip()) @staticmethod def str_to_type(str_val, val_type): if val_type == 'str': return", "{ \"framework\": [\"tensorflow\", \"caffe2\"], \"batch\": [16, 32] }, policy='strict') Match dictionary only if", "return return_dictionary @staticmethod def dump_json_to_file(dictionary, file_name): \"\"\" Dumps *dictionary* as a json object", "'%s'). Error is %s\" % (value, key, line, str(err))) @staticmethod def match(dictionary, query,", "group continue return True class ConfigurationLoader(object): \"\"\"Loads experimenter configuration from multiple files.\"\"\" @staticmethod", "but just in case if 'val' not in val: raise ConfigurationError( \"Parameter info", "one of the provided extensions. :param str fname: The file name to check.", "used: 1. Load standard configuration. In this case, parameter redefinition is prohibited. If", "%s = %s\", name, param_info[name] ) @staticmethod def remove_info(config): \"\"\"In parameter section of", "keys is None: return dictionary return dict((k, dictionary[k]) for k in keys if", "if keys is None: return dictionary return dict((k, dictionary[k]) for k in keys", "if all keys are in dictionary or keys is None \"\"\" if keys", "one parameter is defined in multiple files. This is intended behaviour for now", "to fix this.\", json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val'] = val['val'] # Existing parameter from", "param_info: raise ConfigurationError( \"Parameter info update error.\" \" Parameter redefinition is not allowed", "dictionary with items which keys start with *prefix*. Creates new dictionary with items", "keys if k in dictionary) @staticmethod def contains(dictionary, keys): \"\"\"Checkes if dictionary contains", "a user-provided configuration, we disallow parameter redefinition. if name in param_info: raise ConfigurationError(", "with open(file_name, 'w') as file_obj: json.dump(dictionary, file_obj, indent=4) @staticmethod def add(dictionary, iterable, pattern,", "object :return: None of fname is None else JSON loaded from the file.", "actually dictionaries. If policy is 'strict', every key in query must exist in", "== '': # Take special care if value is an empty string if", "# New parameter, set it info object. # TODO what about parameter type", "put it into lists and return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that maps metric", "for matching. It's assuemd we match primitive types such as numbers and strings", "key 'framework' with value \"tensorflow\" OR\\ \"caffe2\". match(dictionary, query = { \"framework\": [\"tensorflow\",", "with open(self.pid_file, 'w') as fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object): \"\"\"A", "\"tensorflow\" }, policy='relaxed') Match dictionary if it does not contain key 'framework' OR", "is None\") if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname, 'w') as fobj:", "key already exists. One use case to use this method is to populate", "continue return True class ConfigurationLoader(object): \"\"\"Loads experimenter configuration from multiple files.\"\"\" @staticmethod def", "% config_file) with open(config_file) as file_obj: try: # A part of global configuration", "if 'type' not in param_info[name] or 'desc' not in param_info[name]: logging.warn( \"Parameter definition", "with open(self.pid_file, 'w') as fhandle: fhandle.write('%d' % pid) def run(self): \"\"\"Create queue and", "is a result of parsing a JSON configuration file. :param bool is_user_config: If", "still update parameter info structure, but deal with it in slightly different way.", "\"Invalid value type %s\" % val_type def get_measurements(self): \"\"\"Dequeue all data, put it", "update error - unexpected type of key value: \" \" is_root=%s, key=%s, value", "= copy.deepcopy(source[key]) if both_lists else source[key] class ResourceMonitor(object): \"\"\"The class is responsible for", "param_info: A parameter info dictionary that maps parameter name to its description dictionary", "pid): \"\"\"Write the pid into pid file. :param int pid: A pid to", "lists or dictionaries. If values in query are lists, then condition OR applies.", "and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'): raise ConfigurationError( \"Parameter info update", "key not in dest: # The key in source dictionary is not in", "else: self.__fobj = open(self.__fname, self.__flags[1]) return self.__fobj def __exit__(self, type, value, traceback): self.__fobj.close()", ":return: New dictionary with items which keys names start with *prefix*. \"\"\" return_dictionary", "all files with JSON extension in **path** are loaded. :return: A tuple consisting", "info This method loads configuration files located in 'path'. If `files` is empty,", "removed \"\"\" clean_config = copy.deepcopy(config) if 'parameters' in clean_config: params = clean_config['parameters'] for", "available. \"\"\" @staticmethod def try_import(module_name): \"\"\"Tries to import module. :param str module_name: A", "is '%s' but expected is one of '%s'\", key, schema[val_type]) if key not", "fobj: return json.load(fobj) @staticmethod def write_json(fname, data, check_extension=False): \"\"\" Dumps *dictionary* as a", "is expected. Warn if key value is suspicious - we can do it", "matching items in ``iterable``. :param bool must_match: Specifies if every element in *iterable*", "interested here only in parameters section where parameter information is defined. There are", "in-place. :param dict dictionary: Dictionary to modify. :param str separator: An item separator.", "name to check. :param tuple extensions: A tuple of extensions to use. Raises", "in returned dictionary. :return: New dictionary with items which keys names start with", "{} for key in dictionary: if key.startswith(prefix): return_key = key[len(prefix):] if remove_prefix else", "# you may not use this file except in compliance with the License.", "file_name_pattern: A file name pattern to search. Only used for entries in path_specs", "def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False): \"\"\" Updates *dictionary* with items from", "config: return params = config['parameters'] for name in params: val = params[name] if", "will put the data into a queue. A main thread will then dequeue", "cannot be None.\") if not os.path.isdir(path): raise ValueError(\"Configuration load error. The 'path' parameter", "in slightly different way. If parameter in `config` exists in param_info, it means", "or their members. :param dict dest: Merge data to this dictionary. :param dict", "queue and process. :param str launcher: A full path to resource monitor script.", "of fname does not end with one of the extensions. \"\"\" if fname", "expected type is one of %s\" % \\ (str(is_root), key, val_type, str(schema['types'])) )", "isinstance(source[key], dict) both_lists = isinstance(dest[key], list) and isinstance(source[key], list) both_primitive = type(dest[key]) is", "os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def find_files(directory, file_name_pattern, recursively=False): \"\"\"Find files in a directory, possibly,", "\"\"\"Find files in a directory, possibly, recursively. Find files which names satisfy *file_name_pattern*", "in a standard way as we define types for standard parameters or induced", "name is None\") if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname, 'w') as", "param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its value else:", "we define types for standard parameters or induced automatically based on JSON parse", "match: return False else: if matches is not None: matches['%s_0' % (field)] =", "\"\"\"Tries to import module. :param str module_name: A name of a module to", "# Firstly, check that type of value is expected. val_type = type(source[key]).__name__ if", "policy is 'strict', every key in query must exist in dictionary with the", "Others are ignored. :param boolean ignore_erros: If true, ignore errors. :raises ConfigurationError: If", "added into\\ *dictionary*. Others are ignored. :param boolean ignore_erros: If true, ignore errors.", "path is None: raise ValueError(\"Configuration load error. The 'path' parameter cannot be None.\")", "In this case, we still update parameter info structure, but deal with it", "exist else find first file name that file does not exist. \"\"\" if", "for f in glob(os.path.join(p[0], file_name_pattern))] return files @staticmethod def gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get", "Parameter definition is\" \" %s = %s\", name, param_info[name] ) @staticmethod def remove_info(config):", "The key in source dictionary is not in destination dictionary. dest[key] = copy.deepcopy(source[key])", "fobj: json.dump(data, fobj, indent=4) class DictUtils(object): \"\"\"Container for dictionary helpers.\"\"\" @staticmethod def subdict(dictionary,", "raises exception if fname does not end with '.json' or '.json.gz'. :rtype: None", "if field not in dictionary: if policy == 'relaxed': continue else: return False", "here, but create or empty it in host OS. \"\"\" self.empty_pid_file() self.queue =", "They are optional, so we can disable certain functionality if something is missing.", "must return two groups (1 and 2). First group is considered as a", "key 'framework' with value \"tensorflow\" OR \"caffe2\"\\ and (b) it contains key 'batch'", "power gpus_power while not self.queue.empty(): data = self.queue.get().strip().split() for field in self.fields: tp", "str(param_info[name]), val) ) if isinstance(val, dict): # This is a complete parameter definition", "and not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]') if both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False)", "match, else exception is thrown. if is_root: schema = {'types':(dict, list), 'dict':['parameters', 'variables'],", "for key in keys: if key not in dictionary: return False return True", "satisfy *file_name_pattern* pattern in folder *directory*. If *recursively* is True, scans subfolders as", "objects. False if these objects are members. \"\"\" def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types):", "cmd = [ launcher, pid_file, '', str(frequency) ] process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE,", "recursively=False): \"\"\"Find files in a directory, possibly, recursively. Find files which names satisfy", "= {} raw_fields = fields_specs.split(',') for raw_field in raw_fields: fields_split = raw_field.split(':') assert", "param_info, it means user has provided their specific value for this parameter. Types", "if not fname.endswith(extensions): raise ValueError(\"Invalid file extension (%s). Must be one of %s\"", "key in dictionary: if key.startswith(prefix): return_key = key[len(prefix):] if remove_prefix else key return_dictionary[return_key]", "= copy.deepcopy(val) # New parameter, set it info object. # TODO what about", "value needs to be provided.\" \" We will proceed but you may want", "key, schema[val_type]) if key not in dest: # The key in source dictionary", "certain system information will not be available\", module_name) have_module = False return have_module", "add_only_keys=None, ignore_errors=False): \"\"\" Updates *dictionary* with items from *iterable* object. This method modifies/updates", "it does not exist. \"\"\" if key not in dictionary: dictionary[key] = copy.deepcopy(default_value)", "= config['parameters'] for name in params: val = params[name] if not is_user_config: #", "value is suspicious - we can do it only for root. if is_root", "None, specifies keys that are added into\\ *dictionary*. Others are ignored. :param boolean", "specified by an `inputs` parameter. :param list path_specs: A list of file names", "count = 0 else: count = int(fields_split[3]) self.fields[field_name] = { 'type': field_type, 'index':", "Error is %s\" % (value, key, line, str(err))) @staticmethod def match(dictionary, query, policy='relaxed',", "are define here :py:class:`dlbs.IOUtils` and :py:class:`dlbs.DictUtils`. \"\"\" import os import copy import json", "for f in glob(os.path.join(directory, file_name_pattern))] else: files = [f for p in os.walk(directory)", "= 1000): \"\"\"Return file name that does not exist. :param str file_name: Input", "it means user has provided their specific value for this parameter. Types of", "Dictionary to match. :param dict query: Query to use. :param ['relaxed', 'strict'] policy:", "in dictionary :rtype: boolean :return: True if all keys are in dictionary or", "is not None: IOUtils.mkdirf(file_name) with open(file_name, 'w') as file_obj: json.dump(dictionary, file_obj, indent=4) @staticmethod", "and not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def find_files(directory, file_name_pattern, recursively=False): \"\"\"Find files in a", "with items from *iterable* object. This method modifies/updates *dictionary* with items from *iterable*", "indicating if import has been succesfull or not. Used by a Modules class", "\"\"\" metrics = {} for key in self.fields.keys(): metrics[key] = [] # What's", "match = re.compile(value).match(dictionary[field]) if not match: return False else: if matches is not", "} @staticmethod def monitor_function(launcher, pid_file, frequency, queue): \"\"\"A main monitor worker function. :param", "it in update_param_info, but just in case if 'val' not in val: raise", "License for the specific language governing permissions and # limitations under the License.", "{ 'type': field_type, 'index': index, 'count': count } @staticmethod def monitor_function(launcher, pid_file, frequency,", "is thrown. Regexp pattern must return two groups (1 and 2). First group", "= copy.deepcopy(dictionary[key]) return return_dictionary @staticmethod def dump_json_to_file(dictionary, file_name): \"\"\" Dumps *dictionary* as a", "raw_field in raw_fields: fields_split = raw_field.split(':') assert len(fields_split) in (3, 4),\\ \"Invalid format", "file does not exist. \"\"\" if not os.path.exists(file_name): return file_name attempt = 0", "continue else: match = re.compile(value).match(dictionary[field]) if not match: return False else: if matches", "JSON configuration in file %s\", config_file) raise return (config_files, config, param_info) @staticmethod def", "dictionary: return False return True @staticmethod def ensure_exists(dictionary, key, default_value=None): \"\"\" Ensures that", "{'types':(dict, list), 'dict':['parameters', 'variables'], 'list':['extensions']} else: schema = {'types':(list, basestring, int, float, long)}", "_raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]') if both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False) else: dest[key].extend(source[key]) else:", "be a tuple.\" if not fname.endswith(extensions): raise ValueError(\"Invalid file extension (%s). Must be", "(field)] = dictionary[field] for index, group in enumerate(match.groups()): matches['%s_%d' % (field, index+1)] =", ":param str file_name_pattern: A file name pattern to search. For instance, is can", "% val_type def get_measurements(self): \"\"\"Dequeue all data, put it into lists and return", "glob import glob from dlbs.exceptions import ConfigurationError class OpenFile(object): \"\"\"Class that can work", "!= '' and not os.path.isdir(dir_name): os.makedirs(dir_name) @staticmethod def find_files(directory, file_name_pattern, recursively=False): \"\"\"Find files", "\"License\"); # you may not use this file except in compliance with the", "class that imports a particular models and return boolean variable indicating if import", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "False if isinstance(value, list) or not isinstance(value, basestring): values = value if isinstance(value,", "__enter__(self): if self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname, self.__flags[0]) else: self.__fobj = open(self.__fname, self.__flags[1]) return", "in dictionary: if policy == 'relaxed': continue else: return False if isinstance(value, list)", "to search. Only used for entries in path_specs that are directories. :param bool", "groups (1 and 2). First group is considered as a key, and second", "None: return dictionary return dict((k, dictionary[k]) for k in keys if k in", "modules this project depends on. They are optional, so we can disable certain", "not exist. \"\"\" if key not in dictionary: dictionary[key] = copy.deepcopy(default_value) @staticmethod def", "are root configuration objects. False if these objects are members. \"\"\" def _raise_types_mismatch_config_error(key,", "specific value for this parameter. Types of user defined parameters are defined either", "or dictionaries. If values in query are lists, then condition OR applies. For", "Parameter definition is %s = %s\" % (param_info[name]['type'], name, param_info[name]) ) if 'type'", "f in glob(os.path.join(directory, file_name_pattern))] else: files = [f for p in os.walk(directory) for", ">= max_attempts: msg = \"Cannot find non existing file from pattern %s\" raise", "else: config_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.json')] config =", "'extensions' must be a tuple.\" if not fname.endswith(extensions): raise ValueError(\"Invalid file extension (%s).", "and will put the data into a queue. A main thread will then", "in self.fields: tp = self.fields[field]['type'] idx = self.fields[field]['index'] count = self.fields[field]['count'] if count", "return self.__fobj def __exit__(self, type, value, traceback): self.__fobj.close() class IOUtils(object): \"\"\"Container for input/output", "in dictionary and query is used for matching. It's assuemd we match primitive", "A full path to folder where pid file is created. The file name", "name in params: val = params[name] if not is_user_config: # If this is", "% (field)] = dictionary[field] continue else: match = re.compile(value).match(dictionary[field]) if not match: return", "prefix. The dictionary *dictionary* is not modified. :param dict dictionary: Dictionary to search", "iterable`` (list, opened file etc). Only those items in *iterable* are considered, that", "try: value = match.group(2).strip() value = json.loads(value) if len(value) > 0 else None", "files: List of file names to load. If None, all files with JSON", "gzip.open(self.__fname, self.__flags[0]) else: self.__fobj = open(self.__fname, self.__flags[1]) return self.__fobj def __exit__(self, type, value,", ":param str patter: A regexp pattern for matching items in ``iterable``. :param bool", "False otherwise. \"\"\" have_module = True try: importlib.import_module(module_name) except ImportError: logging.warn(\"Module '%s' cannot", "query, policy='relaxed', matches=None): \"\"\" Match *query* against *dictionary*. The *query* and *dictionary* are", "log files. :param dict dictionary: Dictionary to update in-place. :param obj iterable: Iterable", "json files are loaded from that folder. This method fails if one parameter", "with configuration section that may contain parameters, variables and extensions. The **config** is", "and query is used for matching. It's assuemd we match primitive types such", "in. \"\"\" if file_name is not None: IOUtils.mkdirf(file_name) with open(file_name, 'w') as file_obj:", "items in ``iterable``. :param bool must_match: Specifies if every element in *iterable* must", "object. This method modifies/updates *dictionary* with items from *iterable* object. This object must", "output and will put the data into a queue. A main thread will", "import Queue from glob import glob from dlbs.exceptions import ConfigurationError class OpenFile(object): \"\"\"Class", "that contains key/value pairs for key in keys. \"\"\" if keys is None:", "as err: if not ignore_errors: raise ConfigurationError(\"Cannot parse JSON string '%s' with key", "we deal with parameters in config that redefine parameters in existing param_info differently.", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "not None, specifies keys that are added into\\ *dictionary*. Others are ignored. :param", "value != dictionary[field]: return False elif matches is not None: matches['%s_0' % (field)]", "raw_field field_name = fields_split[0] assert field_name not in self.fields,\\ \"Found duplicate timeseries field", "in writing, software # distributed under the License is distributed on an \"AS", "separated string of numbers. queue.put(output.strip()) @staticmethod def str_to_type(str_val, val_type): if val_type == 'str':", "does not exist. \"\"\" if key not in dictionary: dictionary[key] = copy.deepcopy(default_value) @staticmethod", "a JSON file. :param str file_name: Name of a file to serialie dictionary", "for resource monitor to finish.\"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread()", "isinstance(source[key], list) both_primitive = type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring, int, float, long))", "mem power gpus_power while not self.queue.empty(): data = self.queue.get().strip().split() for field in self.fields:", "else: count = int(fields_split[3]) self.fields[field_name] = { 'type': field_type, 'index': index, 'count': count", "thread. Due to possible execution of benchmarks in containers, we must not delete", "string. \"\"\" matcher = re.compile(pattern) for line in iterable: match = matcher.match(line) if", "it (a) contains key 'framework' with value \"tensorflow\" OR \"caffe2\"\\ and (b) it", "@staticmethod def mkdirf(file_name): \"\"\"Makes sure that parent folder of this file exists. The", "all data at once once experiment is completed. \"\"\" cmd = [ launcher,", "== '': count = 0 else: count = int(fields_split[3]) self.fields[field_name] = { 'type':", "project depends on. They are optional, so we can disable certain functionality if", "keys from 'keys'. :param dict dictionary: Input dictionary. :param list_or_val keys: Keys to", "is not None: matches['%s_0' % (field)] = dictionary[field] continue else: match = re.compile(value).match(dictionary[field])", "constraints etc. :param dict config: A dictionary with configuration section that may contain", "k in dictionary) @staticmethod def contains(dictionary, keys): \"\"\"Checkes if dictionary contains all keys", "'[list, basestring, int, float, long]') dest[key] = copy.deepcopy(source[key]) if both_lists else source[key] class", "file names to load. If None, all files with JSON extension in **path**", "raise ValueError(msg % file_name) @staticmethod def check_file_extensions(fname, extensions): \"\"\"Checks that fname has one", "@staticmethod def find_files(directory, file_name_pattern, recursively=False): \"\"\"Find files in a directory, possibly, recursively. Find", "in dictionary with the same value to match. If policy is 'relaxed', dictionary", ":return: List of file names satisfying *file_name_pattern* pattern. \"\"\" if not recursively: files", "dictionary. :param list_or_val keys: Keys to extract :rtype: dict :return: Dictionary that contains", "basestring): values = value if isinstance(value, list) else [value] if dictionary[field] not in", "If False, this is a system configuration. Based on this flag, we deal", "\" %s = %s\", name, param_info[name] ) @staticmethod def remove_info(config): \"\"\"In parameter section", "self.monitor_process = Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency, self.queue) ) self.monitor_process.start() def stop(self): \"\"\"Closes", "path to parent folder does not exist, it will be created. See documentation", "def match(dictionary, query, policy='relaxed', matches=None): \"\"\" Match *query* against *dictionary*. The *query* and", "isinstance(keys, list) else [keys] for key in keys: if key not in dictionary:", "monitor script. :param str pid_folder: A full path to folder where pid file", "not os.path.isdir(path): raise ValueError(\"Configuration load error. The 'path' parameter (%s) must point to", "manager that monitors system resource consumption. proc_pid date virt res shrd cpu mem", "want to make sure\\ its parent directory exists. \"\"\" dir_name = os.path.dirname(file_name) if", "Only those items in *iterable* are considered, that match *pattern* (it's a regexp", "a queue. A main thread will then dequeue all data at once once", "] process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = process.stdout.readline() if", "'relaxed', dictionary may not contain all keys from query to be matched. In", "0 else: count = int(fields_split[3]) self.fields[field_name] = { 'type': field_type, 'index': index, 'count':", "intersection of keys in dictionary and query is used for matching. It's assuemd", "type=%s, expected type is one of %s\" % \\ (str(is_root), key, val_type, str(schema['types']))", "does not contain type ('type') and/or description ('desc').\" \" You should fix this.", "params = config['parameters'] for name in params: val = params[name] if not is_user_config:", "bool remove_prefix: If True, remove prefix in returned dictionary. :return: New dictionary with", "in folder *directory*. If *recursively* is True, scans subfolders as well. :param str", "of %s\" % \\ (str(is_root), key, val_type, str(schema['types'])) ) # So, the type", "%s\", config_file) raise return (config_files, config, param_info) @staticmethod def update_param_info(param_info, config, is_user_config=False): \"\"\"Update", "value \"tensorflow\" OR \"caffe2\"\\ and (b) it contains key 'batch' with value 16", "val_type == 'bool': v = str_val.lower() assert v in ('true', 'false', '1', '0',", "error.\" \" Parameter has invalid type = '%s'.\" \" Parameter definition is %s", "not in dictionary: return False return True @staticmethod def ensure_exists(dictionary, key, default_value=None): \"\"\"", "not a json-parseable string. \"\"\" matcher = re.compile(pattern) for line in iterable: match", "else ['wb', 'w'] def __enter__(self): if self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname, self.__flags[0]) else: self.__fobj", "key-value from '%s' with pattern '%s'. Must match is set to true\" %", "query = { \"framework\": \"tensorflow\" }, policy='relaxed') Match dictionary if it does not", "None \"\"\" if keys is None: return True keys = keys if isinstance(keys,", "field_type, 'index': index, 'count': count } @staticmethod def monitor_function(launcher, pid_file, frequency, queue): \"\"\"A", "Queue() self.monitor_process = Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency, self.queue) ) self.monitor_process.start() def stop(self):", "str file_name: A name of the file for which we want to make", "list. Existing items in *dictionary* are overwritten with new ones if key already", "use. :param ['relaxed', 'strict'] policy: Policy to match. :param dict matches: Dictionary where", "schema['types']): raise ConfigurationError( \"Configuration update error - unexpected type of key value: \"", "key not in dictionary: return False return True @staticmethod def ensure_exists(dictionary, key, default_value=None):", "to its description dictionary that contains such fileds as value, help message, type,", "represents user-provided configuration. If False, this is a system configuration. Based on this", "is used: 1. Load standard configuration. In this case, parameter redefinition is prohibited.", "(%s)\" % str_val return v in ('true', 1, 'on') else: assert False, \"Invalid", "name not in param_info: param_info[name] = { 'val': val, 'type': val_type, 'desc': \"No", "happen since we deal with it in update_param_info, but just in case if", "str launcher: A full path to resource monitor script. :param str pid_folder: A", "key, val_type, str(schema['types'])) ) # So, the type is expected. Warn if key", "a dictionary with key-values from log files. :param dict dictionary: Dictionary to update", "float, long)) if is_root: if not both_dicts and not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]),", "else None if add_only_keys is None or key in add_only_keys: dictionary[key] = value", "\"\"\" if fname is None: raise ValueError(\"File name is None\") if check_extension: IOUtils.check_file_extensions(fname,", "elif val_type == 'float': return float(str_val) elif val_type == 'bool': v = str_val.lower()", "if fname does not end with '.json' or '.json.gz'. :rtype: None or JSON", "copy import json import gzip import re import logging import subprocess import importlib", "\"Parameter info update error.\" \" Parameter redefinition is not allowed for non-user configuration.\"", "gpus_power \"\"\" def __init__(self, launcher, pid_folder, frequency, fields_specs): \"\"\"Initializes resource monitor but does", "field_name not in self.fields,\\ \"Found duplicate timeseries field (%s)\" % field_name field_type =", "Ensures that the dictionary *dictionary* contains key *key* If key does not exist,", "return True keys = keys if isinstance(keys, list) else [keys] for key in", "If True, raises exception if fname does not end with '.json' or '.json.gz'.", "value val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__ if", "new item with value *default_value*. The dictionary is modified in-place. :param dict dictionary:", "\"\"\"Checkes if dictionary contains all keys in 'keys' :param dict dictionary: Input dictionary.", "user has provided their specific value for this parameter. Types of user defined", "something in iterable`` (list, opened file etc). Only those items in *iterable* are", "module has been imported, False otherwise. \"\"\" have_module = True try: importlib.import_module(module_name) except", "%s\" % (value, key, line, str(err))) @staticmethod def match(dictionary, query, policy='relaxed', matches=None): \"\"\"", "scenarios this method is used: 1. Load standard configuration. In this case, parameter", "Found this definition: %s=%s\" % (name, val) ) if name not in param_info:", "exception of fname does not end with one of the extensions. \"\"\" if", "# Types and expected key names. Types must always match, else exception is", "float(str_val) elif val_type == 'bool': v = str_val.lower() assert v in ('true', 'false',", "not in param_info[name] or 'desc' not in param_info[name]: logging.warn( \"Parameter definition does not", "]) return metrics def remove_pid_file(self): \"\"\"Deletes pif file from disk.\"\"\" try: os.remove(self.pid_file) except", "[2017] Hewlett Packard Enterprise Development LP # # Licensed under the Apache License,", "open(file_name, 'w') as file_obj: json.dump(dictionary, file_obj, indent=4) @staticmethod def add(dictionary, iterable, pattern, must_match=True,", ":param dict dictionary: Dictionary to match. :param dict query: Query to use. :param", "values: return False if matches is not None: matches['%s_0' % (field)] = dictionary[field]", "into pid file. :param int pid: A pid to write. This is a", "exist, it will be created. See documentation for :py:func:`os.makedirs` for more details. :param", "to serialie dictionary in. \"\"\" if file_name is not None: IOUtils.mkdirf(file_name) with open(file_name,", "should fix this. Parameter definition is\" \" %s = %s\", name, param_info[name] )", "subprocess. The thread is reading its output and will put the data into", "keys are in dictionary or keys is None \"\"\" if keys is None:", ") params[name] = val['val'] return clean_config @staticmethod def update(dest, source, is_root=True): \"\"\"Merge **source**", "\" Parameter definition is %s = %s\" % (param_info[name]['type'], name, param_info[name]) ) if", "= copy.deepcopy(config) if 'parameters' in clean_config: params = clean_config['parameters'] for name in params:", "os import copy import json import gzip import re import logging import subprocess", "params[name] if isinstance(val, dict): # This should not generally happen since we deal", "self.__flags = ['rb', 'r'] if mode == 'r' else ['wb', 'w'] def __enter__(self):", "flag, we deal with parameters in config that redefine parameters in existing param_info", "@staticmethod def lists_to_strings(dictionary, separator=' '): \"\"\" Converts every value in dictionary that is", "dict config: A dictionary with configuration section that may contain parameters, variables and", ":param str file_name: Name of a file to serialie dictionary in. \"\"\" if", "2.0 (the \"License\"); # you may not use this file except in compliance", "its output and will put the data into a queue. A main thread", "subprocess import importlib from multiprocessing import Process from multiprocessing import Queue from glob", "value *default_value*. The dictionary is modified in-place. :param dict dictionary: Dictionary to check.", "value. Values must be a json-parseable strings. If *add_only_keys* is not None, only", "\"batch\": [16, 32] }, policy='strict') Match dictionary only if it (a) contains key", "that are directories. :param bool recursively: If True, search in subdirectories. Only used", "field_name field_type = fields_split[1] assert field_type in ('str', 'int', 'float', 'bool'),\\ \"Invalid field", "put the data into a queue. A main thread will then dequeue all", "*directory*. If *recursively* is True, scans subfolders as well. :param str directory: A", "= type(source[key]).__name__ if not isinstance(source[key], schema['types']): raise ConfigurationError( \"Configuration update error - unexpected", "monitor # script. It's a whitespace separated string of numbers. queue.put(output.strip()) @staticmethod def", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "list of config files, configuration object (dictionary) and dictionary of parameters info This", "boolean ignore_erros: If true, ignore errors. :raises ConfigurationError: If *must_match* is True and", "redefines existing parameters in param_info (already loaded params), program terminates. 2. Load user-provided", "standard configuration. In this case, parameter redefinition is prohibited. If `parameters` section in", "= int(fields_split[2]) if len(fields_split) == 3: count = -1 elif fields_split[3] == '':", "different way. If parameter in `config` exists in param_info, it means user has", "method loads configuration files located in 'path'. If `files` is empty, all json", "exist. \"\"\" if not os.path.exists(file_name): return file_name attempt = 0 while True: candidate_file_name", "return them. time:str:1,mem_virt:float:2,mem_res:float:3,mem_shrd:float:4,cpu:float:5,mem:float:6,power:float:7,gpus:float:8- :return: Dictionary that maps metric field to a time series", "check_extension=False): \"\"\" Dumps *dictionary* as a json object to a file with *file_name*", "files which names satisfy *file_name_pattern* pattern in folder *directory*. If *recursively* is True,", "File name. :param boolean check_extension: If True, raises exception if fname does not", "and expected key names. Types must always match, else exception is thrown. if", "check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname, 'w') as fobj: json.dump(data, fobj, indent=4)", "\" defines its default value. Found this definition: %s=%s\" % (name, val) )", "# # Unless required by applicable law or agreed to in writing, software", "is considered to be value. Values must be a json-parseable strings. If *add_only_keys*", "unexpected type of key value: \" \" is_root=%s, key=%s, value type=%s, expected type", "('.json', '.json.gz')) IOUtils.mkdirf(fname) with OpenFile(fname, 'w') as fobj: json.dump(data, fobj, indent=4) class DictUtils(object):", "a regexp epression). If a particular item does not match, and *must_match* is", "cpu mem power gpus_power \"\"\" def __init__(self, launcher, pid_folder, frequency, fields_specs): \"\"\"Initializes resource", "express or implied. # See the License for the specific language governing permissions", "modified. :param dict dictionary: Dictionary to search keys in. :param str prefix: Prefix", "float, long]') dest[key] = copy.deepcopy(source[key]) if both_lists else source[key] class ResourceMonitor(object): \"\"\"The class", "extension (%s). Must be one of %s\" % extensions) @staticmethod def read_json(fname, check_extension=False):", "file to serialie dictionary in. \"\"\" if fname is None: raise ValueError(\"File name", "self.queue) ) self.monitor_process.start() def stop(self): \"\"\"Closes queue and waits for resource monitor to", "folder. This method fails if one parameter is defined in multiple files. This", "load error. Invalid JSON configuration in file %s\", config_file) raise return (config_files, config,", "it contains key 'batch' with value 16 OR 32. :param dict dictionary: Dictionary", "path) if files is not None: config_files = [os.path.join(path, f) for f in", "standard parameters or induced automatically based on JSON parse result. \"\"\" if 'parameters'", "is completed. \"\"\" cmd = [ launcher, pid_file, '', str(frequency) ] process =", "either express or implied. # See the License for the specific language governing", "data into a queue. A main thread will then dequeue all data at", "once experiment is completed. \"\"\" cmd = [ launcher, pid_file, '', str(frequency) ]", "If `parameters` section in `config` redefines existing parameters in param_info (already loaded params),", "if query is None: return True assert policy in ['relaxed', 'strict'], \"\" for", "= 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__ if name not", ") # So, the type is expected. Warn if key value is suspicious", "have_module = False return have_module class Modules(object): \"\"\"A class that enumerates non-standard python", "val) ) params[name] = val['val'] return clean_config @staticmethod def update(dest, source, is_root=True): \"\"\"Merge", "separator=' '): \"\"\" Converts every value in dictionary that is list to strings.", "to import module. :param str module_name: A name of a module to try", "'fname'. :param str fname: File name. :param boolean check_extension: If True, raises exception", "extensions): \"\"\"Checks that fname has one of the provided extensions. :param str fname:", "resource monitor to finish.\"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join()", "if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__ if name not in param_info:", "if not both_dicts and not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]') if both_dicts:", "0 else None if add_only_keys is None or key in add_only_keys: dictionary[key] =", "as file_obj: json.dump(dictionary, file_obj, indent=4) @staticmethod def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False):", "duplicate timeseries field (%s)\" % field_name field_type = fields_split[1] assert field_type in ('str',", "file_name_pattern))] return files @staticmethod def gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get files specified by an", "not is_user_config: # If this is not a user-provided configuration, we disallow parameter", "A resource monitor is launched as a subprocess. The thread is reading its", "files = [f for f in glob(os.path.join(directory, file_name_pattern))] else: files = [f for", "bool recursively: If True, search in subdirectories. Only used for entries in path_specs", "Used by a Modules class to identify if optional python modules are available.", "existing param_info differently. See comments below. We are interested here only in parameters", "care if value is an empty string if value != dictionary[field]: return False", "@staticmethod def update_param_info(param_info, config, is_user_config=False): \"\"\"Update parameter info dictionary based on configurationi in", "!= dictionary[field]: return False elif matches is not None: matches['%s_0' % (field)] =", "raw_fields = fields_specs.split(',') for raw_field in raw_fields: fields_split = raw_field.split(':') assert len(fields_split) in", "value \"tensorflow\". match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='relaxed') Match dictionary if", "candidate_file_name attempt += 1 if attempt >= max_attempts: msg = \"Cannot find non", "if key not in dictionary: return False return True @staticmethod def ensure_exists(dictionary, key,", "exception. :param list add_only_keys: If not None, specifies keys that are added into\\", "must exist in dictionary with the same value to match. If policy is", "\"\"\" self.empty_pid_file() self.queue = Queue() self.monitor_process = Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency, self.queue)", "start resource monitor in background thread. Due to possible execution of benchmarks in", "the License. # You may obtain a copy of the License at #", "subdictionary containing only keys from 'keys'. :param dict dictionary: Input dictionary. :param list_or_val", "parameter info leaving only their values :param dict config: A dictionary with configuration", "of config files, configuration object (dictionary) and dictionary of parameters info This method", "in dictionary that is list to strings. For every item in *dictionary*, if", "not contain type ('type') and/or description ('desc').\" \" You should fix this. Parameter", "else: if matches is not None: matches['%s_0' % (field)] = dictionary[field] for index,", "contain key 'framework' OR contains\\ key 'framework' with value \"tensorflow\". match(dictionary, query =", "is one of '%s'\", key, schema[val_type]) if key not in dest: # The", "in dest. both_dicts = isinstance(dest[key], dict) and isinstance(source[key], dict) both_lists = isinstance(dest[key], list)", "dictionary: Dictionary to serialize. :param any data: A data to dump into a", "group is considered to be value. Values must be a json-parseable strings. If", "*pattern* (it's a regexp epression). If a particular item does not match, and", "= { \"framework\": [\"tensorflow\", \"caffe2\"], \"batch\": [16, 32] }, policy='strict') Match dictionary only", "if is_root: if not both_dicts and not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]')", "of its value. \"\"\" metrics = {} for key in self.fields.keys(): metrics[key] =", "default_value: Default value for key if it does not exist. \"\"\" if key", "is to ensure that we can write to this file. If path to", "such as type and help messages for config_file in config_files: if not os.path.isfile(config_file):", "on configurationi in **config** :param dict param_info: A parameter info dictionary that maps", "%s\" raise ValueError(msg % file_name) @staticmethod def check_file_extensions(fname, extensions): \"\"\"Checks that fname has", "return dictionary return dict((k, dictionary[k]) for k in keys if k in dictionary)", "loaded. :return: A tuple consisting of a list of config files, configuration object", "'): \"\"\" Converts every value in dictionary that is list to strings. For", "not None: matches['%s_0' % (field)] = dictionary[field] for index, group in enumerate(match.groups()): matches['%s_%d'", "\"caffe2\"\\ and (b) it contains key 'batch' with value 16 OR 32. :param", "exist. A typical usage is to ensure that we can write to this", "not None, only those items are added to *dictionary*, that are in this", "not in param_info: param_info[name] = copy.deepcopy(val) # New parameter, set it info object.", "\"\"\" matcher = re.compile(pattern) for line in iterable: match = matcher.match(line) if not", "launcher, pid_file, '', str(frequency) ] process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True:", "\"Invalid field type (%s). Must be one of ('str', 'int', 'float', 'bool')\" %", "None, only those items are added to *dictionary*, that are in this list.", "element in *iterable* must match\\ *pattern*. If True and not match, raises exception.", "in 'keys' :param dict dictionary: Input dictionary. :param list_or_val keys: Keys to find", "path_specs: A list of file names / directories. :param str file_name_pattern: A file", "pattern '%s'. Must match is set to true\" % (line, pattern)) else: continue", "*dictionary* are actually dictionaries. If policy is 'strict', every key in query must", ":param list files: List of file names to load. If None, all files", "= val # Do final validations if 'type' in param_info[name] and param_info[name]['type'] not", "both_dicts: ConfigurationLoader.update(dest[key], source[key], is_root=False) else: dest[key].extend(source[key]) else: if not both_lists and not both_primitive:", "and # limitations under the License. \"\"\"Two classes are define here :py:class:`dlbs.IOUtils` and", "Name of a file to serialie dictionary in. \"\"\" if file_name is not", "is\" \" %s = %s\", name, param_info[name] ) @staticmethod def remove_info(config): \"\"\"In parameter", "keys: Keys to extract :rtype: dict :return: Dictionary that contains key/value pairs for", "[os.path.join(path, f) for f in files] else: config_files = [os.path.join(path, f) for f", "glob(os.path.join(p[0], file_name_pattern))] return files @staticmethod def gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get files specified by", "parameter. Types of user defined parameters are defined either by user in a", ":param bool is_user_config: If True, the config object represents user-provided configuration. If False,", "\"\"\" if file_name is not None: IOUtils.mkdirf(file_name) with open(file_name, 'w') as file_obj: json.dump(dictionary,", "may contain parameters, variables and extensions. The **config** is a result of parsing", "behaviour for now (this also applies for update_param_info method). \"\"\" if path is", "os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))] return files @staticmethod def gather_files(path_specs, file_name_pattern, recursively=False):", "class ResourceMonitor(object): \"\"\"The class is responsible for launching/shutting down/communicating with external resource manager", "% raw_field field_name = fields_split[0] assert field_name not in self.fields,\\ \"Found duplicate timeseries", "def str_to_type(str_val, val_type): if val_type == 'str': return str_val elif val_type == 'int':", "dict) both_lists = isinstance(dest[key], list) and isinstance(source[key], list) both_primitive = type(dest[key]) is type(source[key])", "whitespace separated string of numbers. queue.put(output.strip()) @staticmethod def str_to_type(str_val, val_type): if val_type ==", "dict dest: Merge data to this dictionary. :param dict source: Merge data from", "power gpus_power \"\"\" def __init__(self, launcher, pid_folder, frequency, fields_specs): \"\"\"Initializes resource monitor but", "load error. The 'path' parameter cannot be None.\") if not os.path.isdir(path): raise ValueError(\"Configuration", "of key value: \" \" is_root=%s, key=%s, value type=%s, expected type is one", "is set to true\" % (line, pattern)) else: continue key = match.group(1).strip() try:", "self.empty_pid_file() self.queue = Queue() self.monitor_process = Process( target=ResourceMonitor.monitor_function, args=(self.launcher, self.pid_file, self.frequency, self.queue) )", "Updates *dictionary* with items from *iterable* object. This method modifies/updates *dictionary* with items", "configuration object (dictionary) and dictionary of parameters info This method loads configuration files", "class IOUtils(object): \"\"\"Container for input/output helpers\"\"\" @staticmethod def mkdirf(file_name): \"\"\"Makes sure that parent", "IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with OpenFile(fname, 'r') as fobj: return json.load(fobj) @staticmethod def write_json(fname,", "queue.put(output.strip()) @staticmethod def str_to_type(str_val, val_type): if val_type == 'str': return str_val elif val_type", "type(dest[key]), type(source[key]), '[list, basestring, int, float, long]') dest[key] = copy.deepcopy(source[key]) if both_lists else", "monitor but does not create queue and process. :param str launcher: A full", "its value. \"\"\" metrics = {} for key in self.fields.keys(): metrics[key] = []", "logging.warn(\"Module '%s' cannot be imported, certain system information will not be available\", module_name)", "'r') as fobj: return json.load(fobj) @staticmethod def write_json(fname, data, check_extension=False): \"\"\" Dumps *dictionary*", "field in self.fields: tp = self.fields[field]['type'] idx = self.fields[field]['index'] count = self.fields[field]['count'] if", "None: return None if check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with OpenFile(fname, 'r') as fobj:", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "path_spec in path_specs: if os.path.isdir(path_spec): files.extend(IOUtils.find_files(path_spec, file_name_pattern, recursively)) elif os.path.isfile(path_spec): files.append(path_spec) return files", "os.remove(self.pid_file) except OSError: pass def empty_pid_file(self): \"\"\"Empty pid file.\"\"\" try: with open(self.pid_file, 'w'):", "self.__fobj = gzip.open(self.__fname, self.__flags[0]) else: self.__fobj = open(self.__fname, self.__flags[1]) return self.__fobj def __exit__(self,", "\"\"\"Empty pid file.\"\"\" try: with open(self.pid_file, 'w'): pass except IOError: pass def write_pid_file(self,", "*recursively* is True, scans subfolders as well. :param str directory: A directory to", "files in. :param str file_name_pattern: A file name pattern to search. For instance,", "variable indicating if import has been succesfull or not. Used by a Modules", "If parameter in `config` exists in param_info, it means user has provided their", "The 'output' is a string printed out by a resource monitor # script.", "waits for resource monitor to finish.\"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('exit') self.queue.close()", "sure\\ its parent directory exists. \"\"\" dir_name = os.path.dirname(file_name) if dir_name != ''", "lists, then condition OR applies. For instance: match(dictionary, query = { \"framework\": \"tensorflow\"", "used for entries in path_specs that are directories. :return: List of file names", "= dictionary[field] continue else: match = re.compile(value).match(dictionary[field]) if not match: return False else:", "not both_lists and not both_primitive: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int, float, long]')", "and isinstance(source[key], list) both_primitive = type(dest[key]) is type(source[key]) and isinstance(dest[key], (basestring, int, float,", "directory exists. \"\"\" dir_name = os.path.dirname(file_name) if dir_name != '' and not os.path.isdir(dir_name):", "is to populate a dictionary with key-values from log files. :param dict dictionary:", "update error.\" \" Parameter that is defined by a dictionary must contain 'val'", "index, 'count': count } @staticmethod def monitor_function(launcher, pid_file, frequency, queue): \"\"\"A main monitor", "value \"tensorflow\" OR\\ \"caffe2\". match(dictionary, query = { \"framework\": [\"tensorflow\", \"caffe2\"], \"batch\": [16,", "{ \"framework\": \"tensorflow\" }, policy='relaxed') Match dictionary if it does not contain key", "this list. Existing items in *dictionary* are overwritten with new ones if key", "== 'r' else ['wb', 'w'] def __enter__(self): if self.__fname.endswith('.gz'): self.__fobj = gzip.open(self.__fname, self.__flags[0])", "both_lists else source[key] class ResourceMonitor(object): \"\"\"The class is responsible for launching/shutting down/communicating with", "# Configuration with params/vars/extensions param_info = {} # Information on params such as", "data at once once experiment is completed. \"\"\" cmd = [ launcher, pid_file,", "int(str_val) elif val_type == 'float': return float(str_val) elif val_type == 'bool': v =", "If values in query are lists, then condition OR applies. For instance: match(dictionary,", "as fhandle: fhandle.write('exit') self.queue.close() self.queue.join_thread() self.monitor_process.join() self.remove_pid_file() class _ModuleImporter(object): \"\"\"A private class that", "in ['relaxed', 'strict'], \"\" for field, value in query.iteritems(): if field not in", "responsible for launching/shutting down/communicating with external resource manager that monitors system resource consumption.", "IOUtils.mkdirf(file_name) with open(file_name, 'w') as file_obj: json.dump(dictionary, file_obj, indent=4) @staticmethod def add(dictionary, iterable,", "string '%s' with key '%s' (key-value definition: '%s'). Error is %s\" % (value,", "is not None: config_files = [os.path.join(path, f) for f in files] else: config_files", "set to true\" % (line, pattern)) else: continue key = match.group(1).strip() try: value", "this single file. ConfigurationLoader.update(config, ConfigurationLoader.remove_info(config_section)) except ValueError: logging.error(\"Configuration load error. Invalid JSON configuration", "a result of parsing a JSON configuration file. :param bool is_user_config: If True,", "@staticmethod def write_json(fname, data, check_extension=False): \"\"\" Dumps *dictionary* as a json object to", "tp) for index in xrange(idx, idx+count) ]) return metrics def remove_pid_file(self): \"\"\"Deletes pif", "proceed but you may want to fix this.\", json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val'] =", "for key if it does not exist. \"\"\" if key not in dictionary:", "\"framework\": \"tensorflow\" }, policy='strict') Match dictionary only if it contains key 'framework' with", "\"framework\": [\"tensorflow\", \"caffe2\"], \"batch\": [16, 32] }, policy='strict') Match dictionary only if it", "their members. :param dict dest: Merge data to this dictionary. :param dict source:", "name pattern to search. Only used for entries in path_specs that are directories.", "that contains such fileds as value, help message, type, constraints etc. :param dict", "fhandle: fhandle.write('%d' % pid) def run(self): \"\"\"Create queue and start resource monitor in", "policy='relaxed') Match dictionary if it does not contain key 'framework' OR contains\\ key", "= frequency self.queue = None self.monitor_process = None # Parse fields specs #", "match has been identified. :return: True if match or query is None :rtype:", "section where parameter information is defined. There are two scenarios this method is", "and 2). First group is considered as a key, and second group is", "if 'parameters' in clean_config: params = clean_config['parameters'] for name in params: val =", "else: return False if isinstance(value, list) or not isinstance(value, basestring): values = value", "== 3: count = -1 elif fields_split[3] == '': count = 0 else:", "of parameters info This method loads configuration files located in 'path'. If `files`", "as well. :param str directory: A directory to search files in. :param str", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "attempt += 1 if attempt >= max_attempts: msg = \"Cannot find non existing", "isinstance(value, list) else [value] if dictionary[field] not in values: return False if matches", "extension in **path** are loaded. :return: A tuple consisting of a list of", "not self.queue.empty(): data = self.queue.get().strip().split() for field in self.fields: tp = self.fields[field]['type'] idx", "pid file.\"\"\" try: with open(self.pid_file, 'w'): pass except IOError: pass def write_pid_file(self, pid):", "Creates new dictionary with items from *dictionary* which keys names starts with *prefix*.", "with value \"tensorflow\". match(dictionary, query = { \"framework\": \"tensorflow\" }, policy='relaxed') Match dictionary", "{ 'val': val, 'type': val_type, 'desc': \"No description for this parameter provided (it", "dictionary helpers.\"\"\" @staticmethod def subdict(dictionary, keys): \"\"\"Return subdictionary containing only keys from 'keys'.", "Can be something like 0.1 seconds \"\"\" self.launcher = launcher self.pid_file = os.path.join(pid_folder,", "traceback): self.__fobj.close() class IOUtils(object): \"\"\"Container for input/output helpers\"\"\" @staticmethod def mkdirf(file_name): \"\"\"Makes sure", "well. :param str directory: A directory to search files in. :param str file_name_pattern:", "% extensions) @staticmethod def read_json(fname, check_extension=False): \"\"\"Reads JSON object from file 'fname'. :param", "load. If None, all files with JSON extension in **path** are loaded. :return:", "int(fields_split[2]) if len(fields_split) == 3: count = -1 elif fields_split[3] == '': count", "*separator*. The dictictionary is modified in-place. :param dict dictionary: Dictionary to modify. :param", "monitors system resource consumption. proc_pid date virt res shrd cpu mem power gpus_power", "files is not None: config_files = [os.path.join(path, f) for f in files] else:", "added to dictionary\", key, str(value)) except ValueError as err: if not ignore_errors: raise", "from file 'fname'. :param str fname: File name. :param boolean check_extension: If True,", "% (name, str(param_info[name]), val) ) if isinstance(val, dict): # This is a complete", "defines its default value. Found this definition: %s=%s\" % (name, val) ) if", "\"Parameter info remove error.\" \" Parameter that is defined by a dictionary must", "True if **dest** and *source** are root configuration objects. False if these objects", "val_type, str(schema['types'])) ) # So, the type is expected. Warn if key value", "info dictionary based on configurationi in **config** :param dict param_info: A parameter info", "a debugging function and most likely should not be used. \"\"\" with open(self.pid_file,", "in config that redefine parameters in existing param_info differently. See comments below. We", "*iterable* object. This method modifies/updates *dictionary* with items from *iterable* object. This object", "\"\"\" with open(self.pid_file, 'w') as fhandle: fhandle.write('%d' % pid) def run(self): \"\"\"Create queue", "*prefix*. \"\"\" return_dictionary = {} for key in dictionary: if key.startswith(prefix): return_key =", "match\\ *pattern*. If True and not match, raises exception. :param list add_only_keys: If", "key not in schema[val_type]: logging.warn(\"The name of a root key is '%s' but", "If None, all files with JSON extension in **path** are loaded. :return: A", "does not match, and *must_match* is True, *ConfigurationError* exception is thrown. Regexp pattern", "1000): \"\"\"Return file name that does not exist. :param str file_name: Input file", "\"\"\" if query is None: return True assert policy in ['relaxed', 'strict'], \"\"", "into a string using separator *separator*. The dictictionary is modified in-place. :param dict", "terminates. 2. Load user-provided configuration. In this case, we still update parameter info", "in keys if k in dictionary) @staticmethod def contains(dictionary, keys): \"\"\"Checkes if dictionary", "The dictictionary is modified in-place. :param dict dictionary: Dictionary to modify. :param str", "v in ('true', 1, 'on') else: assert False, \"Invalid value type %s\" %", "gzip import re import logging import subprocess import importlib from multiprocessing import Process", "= \"Cannot find non existing file from pattern %s\" raise ValueError(msg % file_name)", "dest: Merge data to this dictionary. :param dict source: Merge data from this", "for field in self.fields: tp = self.fields[field]['type'] idx = self.fields[field]['index'] count = self.fields[field]['count']", "= -1 elif fields_split[3] == '': count = 0 else: count = int(fields_split[3])", "script. It's a whitespace separated string of numbers. queue.put(output.strip()) @staticmethod def str_to_type(str_val, val_type):", "with JSON extension in **path** are loaded. :return: A tuple consisting of a", "val['val'] # Existing parameter from user configuration, update its value else: # Just", "imports a particular models and return boolean variable indicating if import has been", "if not recursively: files = [f for f in glob(os.path.join(directory, file_name_pattern))] else: files", "with key-values from log files. :param dict dictionary: Dictionary to update in-place. :param", "special care if value is an empty string if value != dictionary[field]: return", "str file_name_pattern: A file name pattern to search. Only used for entries in", "import importlib from multiprocessing import Process from multiprocessing import Queue from glob import", "not end with one of the extensions. \"\"\" if fname is None: return", "prefix, remove_prefix=True): \"\"\"Creates new dictionary with items which keys start with *prefix*. Creates", "from dlbs.exceptions import ConfigurationError class OpenFile(object): \"\"\"Class that can work with gzipped and", "logging.warn( \" Parameter (%s) entirely redefines existing parameter (%s).\" \" Normally, only value", "value. \"\"\" metrics = {} for key in self.fields.keys(): metrics[key] = [] #", "('desc').\" \" You should fix this. Parameter definition is\" \" %s = %s\",", "identify if optional python modules are available. \"\"\" @staticmethod def try_import(module_name): \"\"\"Tries to", "shrd cpu mem power gpus_power \"\"\" def __init__(self, launcher, pid_folder, frequency, fields_specs): \"\"\"Initializes", "does not exist, it will be created. See documentation for :py:func:`os.makedirs` for more", "has been imported, False otherwise. \"\"\" have_module = True try: importlib.import_module(module_name) except ImportError:", "if these objects are members. \"\"\" def _raise_types_mismatch_config_error(key, dest_val_type, src_val_type, valid_types): raise ConfigurationError(", "if key value is suspicious - we can do it only for root.", "are available. \"\"\" @staticmethod def try_import(module_name): \"\"\"Tries to import module. :param str module_name:", "load error. Configuration data cannot be loaded for not a file (%s)\" %", "parameter info dictionary that maps parameter name to its description dictionary that contains", "if not isinstance(source[key], schema['types']): raise ConfigurationError( \"Configuration update error - unexpected type of", "key from source is in dest. both_dicts = isinstance(dest[key], dict) and isinstance(source[key], dict)", "final validations if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float',", "been identified. :return: True if match or query is None :rtype: bool \"\"\"", "'0', 'on', 'off'),\\ \"Invalid boolean value in string (%s)\" % str_val return v", "pid_file, frequency, queue): \"\"\"A main monitor worker function. :param str launcher: A full", "raise ValueError(\"Configuration load error. The 'path' parameter (%s) must point to an existing", "if must_match: raise ConfigurationError(\"Cannot match key-value from '%s' with pattern '%s'. Must match", "count } @staticmethod def monitor_function(launcher, pid_file, frequency, queue): \"\"\"A main monitor worker function.", "except in compliance with the License. # You may obtain a copy of", "f in glob(os.path.join(p[0], file_name_pattern))] return files @staticmethod def gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get files", "raise ConfigurationError( \"Configuration update error - unexpected type of key value: \" \"", "stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = process.stdout.readline() if output == '' and process.poll()", "both_dicts and not both_lists: _raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[dict, list]') if both_dicts: ConfigurationLoader.update(dest[key], source[key],", "with '.json' or '.json.gz'. :rtype: None or JSON object :return: None of fname", "class Modules(object): \"\"\"A class that enumerates non-standard python modules this project depends on.", "main monitor worker function. :param str launcher: A full path to resource monitor", "use case to use this method is to populate a dictionary with key-values", "the type is expected. Warn if key value is suspicious - we can", "@staticmethod def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False): \"\"\" Updates *dictionary* with items", "string (%s)\" % str_val return v in ('true', 1, 'on') else: assert False,", "process. :param str launcher: A full path to resource monitor script. :param str", "must be a json-parseable strings. If *add_only_keys* is not None, only those items", "dictionary that contains such fileds as value, help message, type, constraints etc. :param", "= {} # Configuration with params/vars/extensions param_info = {} # Information on params", "(%s)\" % field_name field_type = fields_split[1] assert field_type in ('str', 'int', 'float', 'bool'),\\", "\"Parameter info update error.\" \" Parameter has invalid type = '%s'.\" \" Parameter", "provided.\" \" We will proceed but you may want to fix this.\", json.dumps(val),", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "files @staticmethod def get_non_existing_file(file_name, max_attempts = 1000): \"\"\"Return file name that does not", "definition does not contain type ('type') and/or description ('desc').\" \" You should fix", "it only for root. if is_root and key not in schema[val_type]: logging.warn(\"The name", "the extensions. \"\"\" if fname is None: return assert isinstance(extensions, tuple), \"The 'extensions'", "is 'list', converts this list into a string using separator *separator*. The dictictionary", "exists. The file itself may not exist. A typical usage is to ensure", "cannot be loaded for not a file (%s)\" % config_file) with open(config_file) as", "by an `inputs` parameter. :param list path_specs: A list of file names /", "config_file in config_files: if not os.path.isfile(config_file): raise ValueError(\"Configuration load error. Configuration data cannot", "basestring) or isinstance(val, list) else type(val).__name__ if name not in param_info: param_info[name] =", "primitive types such as numbers and strings not lists or dictionaries. If values", "'framework' with value \"tensorflow\" OR \"caffe2\"\\ and (b) it contains key 'batch' with", "config files, configuration object (dictionary) and dictionary of parameters info This method loads", "with name, value and description. if 'val' not in val: raise ConfigurationError( \"Parameter", "/ directories. :param str file_name_pattern: A file name pattern to search. Only used", "dictionary with key-values from log files. :param dict dictionary: Dictionary to update in-place.", "= [f for p in os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))] return files", ":param dict dest: Merge data to this dictionary. :param dict source: Merge data", "module. :param str module_name: A name of a module to try to import,", "param_info = {} # Information on params such as type and help messages", "raw_fields: fields_split = raw_field.split(':') assert len(fields_split) in (3, 4),\\ \"Invalid format of field", "if 'val' not in val: raise ConfigurationError( \"Parameter info remove error.\" \" Parameter", "dest_val_type.__name__, key, src_val_type.__name__) ) # Types and expected key names. Types must always", "both_lists = isinstance(dest[key], list) and isinstance(source[key], list) both_primitive = type(dest[key]) is type(source[key]) and", "config_files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.json')] config = {}", "fields_split = raw_field.split(':') assert len(fields_split) in (3, 4),\\ \"Invalid format of field specification", "from disk.\"\"\" try: os.remove(self.pid_file) except OSError: pass def empty_pid_file(self): \"\"\"Empty pid file.\"\"\" try:", "have_module = True try: importlib.import_module(module_name) except ImportError: logging.warn(\"Module '%s' cannot be imported, certain", "process = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = process.stdout.readline() if output", "= group continue return True class ConfigurationLoader(object): \"\"\"Loads experimenter configuration from multiple files.\"\"\"", "pid file. :param int pid: A pid to write. This is a debugging", "to match. :param dict matches: Dictionary where matches will be stored if match", "Only used for entries in path_specs that are directories. :param bool recursively: If", "True and not match or if value\\ is not a json-parseable string. \"\"\"", "a json object to a file with *file_name* name. :param dict dictionary: Dictionary", "that does not exist. :param str file_name: Input file name. :rtype: str :return:", "is True, *ConfigurationError* exception is thrown. Regexp pattern must return two groups (1", "with the same value to match. If policy is 'relaxed', dictionary may not", "with value \"tensorflow\" OR \"caffe2\"\\ and (b) it contains key 'batch' with value", "*add_only_keys* is not None, only those items are added to *dictionary*, that are", ":param boolean ignore_erros: If true, ignore errors. :raises ConfigurationError: If *must_match* is True", "for raw_field in raw_fields: fields_split = raw_field.split(':') assert len(fields_split) in (3, 4),\\ \"Invalid", "with OpenFile(fname, 'r') as fobj: return json.load(fobj) @staticmethod def write_json(fname, data, check_extension=False): \"\"\"", "iterable: match = matcher.match(line) if not match: if must_match: raise ConfigurationError(\"Cannot match key-value", "items are added to *dictionary*, that are in this list. Existing items in", "fname: The file name to check. :param tuple extensions: A tuple of extensions", "key not in dictionary: dictionary[key] = copy.deepcopy(default_value) @staticmethod def lists_to_strings(dictionary, separator=' '): \"\"\"", "(dictionary) and dictionary of parameters info This method loads configuration files located in", "with parameters in config that redefine parameters in existing param_info differently. See comments", "a Modules class to identify if optional python modules are available. \"\"\" @staticmethod", "in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'): raise ConfigurationError( \"Parameter", "that the dictionary *dictionary* contains key *key* If key does not exist, it", "Dictionary that maps metric field to a time series of its value. \"\"\"", "monitor in background thread. Due to possible execution of benchmarks in containers, we", "from multiprocessing import Process from multiprocessing import Queue from glob import glob from", "that redefine parameters in existing param_info differently. See comments below. We are interested", "\" Parameter that is defined by a dictionary must contain 'val' field that\"", "Raises exception of fname does not end with one of the extensions. \"\"\"", "'.json.gz'. :rtype: None or JSON object :return: None of fname is None else", "gather_files(path_specs, file_name_pattern, recursively=False): \"\"\"Find/get files specified by an `inputs` parameter. :param list path_specs:", "section of a **config** the function removes parameter info leaving only their values", "False return have_module class Modules(object): \"\"\"A class that enumerates non-standard python modules this", "check_extension: IOUtils.check_file_extensions(fname, ('.json', '.json.gz')) with OpenFile(fname, 'r') as fobj: return json.load(fobj) @staticmethod def", "match.group(1).strip() try: value = match.group(2).strip() value = json.loads(value) if len(value) > 0 else", "the data into a queue. A main thread will then dequeue all data", "False else: if matches is not None: matches['%s_0' % (field)] = dictionary[field] for", "json.dumps(val), json.dumps(param_info[name]) ) param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update", "while not self.queue.empty(): data = self.queue.get().strip().split() for field in self.fields: tp = self.fields[field]['type']", "open(self.pid_file, 'w') as fhandle: fhandle.write('%d' % pid) def run(self): \"\"\"Create queue and start", "logging.error(\"Configuration load error. Invalid JSON configuration in file %s\", config_file) raise return (config_files,", "are added into\\ *dictionary*. Others are ignored. :param boolean ignore_erros: If true, ignore", "A pid to write. This is a debugging function and most likely should", "this method is to populate a dictionary with key-values from log files. :param", "in os.walk(directory) for f in glob(os.path.join(p[0], file_name_pattern))] return files @staticmethod def gather_files(path_specs, file_name_pattern,", "not match: return False else: if matches is not None: matches['%s_0' % (field)]", "_raise_types_mismatch_config_error(key, type(dest[key]), type(source[key]), '[list, basestring, int, float, long]') dest[key] = copy.deepcopy(source[key]) if both_lists", "self.queue.get().strip().split() for field in self.fields: tp = self.fields[field]['type'] idx = self.fields[field]['index'] count =", "*prefix*. Creates new dictionary with items from *dictionary* which keys names starts with", "except ImportError: logging.warn(\"Module '%s' cannot be imported, certain system information will not be", "if mode == 'r' else ['wb', 'w'] def __enter__(self): if self.__fname.endswith('.gz'): self.__fobj =", ":param dict matches: Dictionary where matches will be stored if match has been", "for this parameter provided (it was automatically converted from its value).\" } else:", "queue. A main thread will then dequeue all data at once once experiment", "= subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: output = process.stdout.readline() if output ==", "the dictionary *dictionary* contains key *key* If key does not exist, it adds", "True keys = keys if isinstance(keys, list) else [keys] for key in keys:" ]
[ "subtitles self._clean_formatting() #Write file self._write_file(self._path_to_sub, self._sub_list) def Offset(self, _sign, _hour=0, _minute=0, _second=0, _ms=0):", "blank line if it is not before a number i = 0 while", "= re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i] =", "string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string else: #correct hours if re.match(r'^[0-9]{1}\\:', string): string", "character i = 0 while i < len(self._sub_list)-1: self._sub_list[i] += '\\r\\n' i +=", "import re from datetime import timedelta from datetime import datetime from regex import", "\\;', ';', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ',", "continue i += 1 #Clean formatting #Remove blank lines #Test numbers #Formatting of", "re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE) #Extract start time and save in timedelta _time_start =", "= 0 while i < len(self._sub_list): if re.search(combined, self._sub_list[i], re.UNICODE): del self._sub_list[i] continue", "or _ms >= 1000: logger.log(\"TidySub : Milisecond is not correct for offset\", logger.DEBUG)", "#Delete unnecessary lines i = 0 count = 1 while i < len(self._sub_list):", "the time def _clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+',", "empty elif self._sub_list[i+3] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3] continue elif", "fileDest.close() logger.log(\"TidySub : INFO: Subtitles file saved\", logger.DEBUG) #Try to detect subtitles language", "Hi in the subtitles file with regex def _clean_hi(self): i = 0 while", "= 1 while i < len(self._sub_list): j = 1 #If the line is", "file containing regex for removal and perform the cleaning and formatting actions\"\"\" def", "_correct = False if not _correct: return False #Save time to offset into", "or _hour > 5: logger.log(\"TidySub : Hour is not correct for offset\", logger.DEBUG)", "del self._sub_list[i+3] continue elif self._sub_list[i+3] == \"\" and re.match('^[0-9]+$', self._sub_list[i+4]): j += 2", "regular expressions self._sub_list[i] = re.sub(r'\\? \\!', '?!', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\? \\?", "number i = 0 while i < len(self._sub_list)-1: if self._sub_list[i] == \"\" and", "re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE) #remove brackets and content self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '', self._sub_list[i],", "(i+1) == len(self._sub_list)-1: del self._sub_list[i+1] continue elif (i+2) == len(self._sub_list)-1: break elif (i+3)", "is not None: logger.log(\"TidySub : INFO: Subtitles file loaded\", logger.DEBUG) return #Load a", "_hour=0, _minute=0, _second=0, _ms=0): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was", "INFO: Removing lyrics\", logger.DEBUG) self._clean_music() #If Hi must be removed if removeHi: logger.log(\"TidySub", "re.UNICODE): self._sub_list[i] = re.sub(r'\\!', ' !', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "not re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2] continue #if 3rd line is not empty elif", "File not encoded in UTF-8 neither in latin-1\", logger.DEBUG) return return tempList =", "English\", logger.DEBUG) return \"en\" else: return \"undefined\" #Test Regex for team words def", "add the EOL character i = 0 while i < len(self._sub_list)-1: self._sub_list[i] +=", "time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j += 1 #Exception if last line", "stock if file is loaded self._is_file_loaded = False #Path to the subtitles file", "\\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end", "_language = force_language #If the team strings must be removed if removeTeam: logger.log(\"TidySub", "\")|(\".join(strings.get_guess_english(),True) + \"))\" + \"([ ]|$)\" _count_french = 0 _count_english = 0 i", "self._clean_music() #If Hi must be removed if removeHi: logger.log(\"TidySub : INFO: Removing HI\",", "\\,', ',', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ',", "return #combine words into one regex string _french = \"(^|[ ])\" + \"((\"", "logger.DEBUG) self._clean_punctuation_fr() elif _language == \"en\": logger.log(\"TidySub : INFO: Correcting punctuation (English)\", logger.DEBUG)", "self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE) #remove braces split in two lines if", "'; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:',", "line if blank if self._sub_list[0] == \"\": del self._sub_list[0] #Delete last line if", "formatting actions\"\"\" def __init__(self, path_to_sub): #Boolean to stock if file is loaded self._is_file_loaded", "_sign == \"-\": _time_start -= _time_offset _time_end -= _time_offset #create the new time", "'- ', self._sub_list[i], re.UNICODE) #Correct not regular expressions self._sub_list[i] = re.sub(r'\\? \\!', '?!',", "len(self._sub_list): if re.search(combined, self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean Hi", "the file if not force_language: _language = self._detect_language(self._path_to_sub) else: _language = force_language #If", "re.UNICODE): self._sub_list[i] = re.sub(r' \\:', ':', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "str(_time_start.minute) + \":\" + str(_time_start.second) + \",\" + str(_time_start.microsecond/1000) + \" --> \"", "'.', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i],", "= string[0:9] + \"00\" + string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string = string[0:9] +", "self._sub_list[i], re.UNICODE) #remove space before closing balise if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i]", "Count the number of occurences of the words for each language while i", "= re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE):", "not \"-\": logger.log(\"TidySub : Bad sign for offset\", logger.DEBUG) _correct = False if", "braces and content self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE) #remove braces split in", "exclamation mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!', ' !', self._sub_list[i], re.UNICODE)", "for offset\", logger.DEBUG) _correct = False if (not isinstance(_second, int)) or _second <", "re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string = string[0:6] + \"0\" + string[6:len(string)] #correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$',", "\"\" and not re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2] continue #if 3rd line is not", "#Correct dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE)", "< (len(self._sub_list) - 1) and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and not re.match(r'\\)', self._sub_list[i], re.UNICODE)", "re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i], re.UNICODE) i += 1 #French: Try to correct", "error : number line\", logger.DEBUG) i += j #Re add the EOL character", "== \"\": del self._sub_list[0] #Delete last line if blank if self._sub_list[len(self._sub_list)-1] == \"\":", "re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2] continue #if 3rd line is not empty elif self._sub_list[i+3]", "logger.DEBUG) return except: try: fileToRead = codecs.open(path_to_file, \"r\", \"utf-8\") except: logger.log(\"TidySub : File", "self._sub_list[i], re.UNICODE) #Remove line with just a single hyphen self._sub_list[i] = re.sub(r'^\\-$', '',", "while i < len(self._sub_list)-1: if self._sub_list[i] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+1]): del", "re.match('^[0-9]+$', self._sub_list[i]): #First line must always be 1 if i == 0: self._sub_list[i]", "\"\" and self._sub_list[i+4] is not \"\" and self._sub_list[i+5] is not \"\" and re.match('^[0-9]+$',", "i = 0 while i < len(self._sub_list)-1: if self._sub_list[i] == \"\" and not", "0 while i < len(self._sub_list)-1: if self._sub_list[i] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+1]):", "path_to_file, toWrite): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG)", "'', self._sub_list[i], re.UNICODE) #delete empty balise self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i],", "elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+5]):", "names into one regex string combined = \"(\" + \")|(\".join(strings.get_teams()) + \")\" i", ": INFO: Removing HI\", logger.DEBUG) self._clean_hi() #If punctuation must be corrected if correct_punctuation:", "fileToRead = codecs.open(path_to_file, \"r\", \"utf-8\") except: logger.log(\"TidySub : File not encoded in UTF-8", "#remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Correct comma", "del self._sub_list[i] del self._sub_list[i] continue elif self._sub_list[i+2] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+3]):", "+= 1 #Return the language which has the highest count if _count_french >", "del self._sub_list[0] #Delete last line if blank if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1]", "import timedelta from datetime import datetime from regex import strings from sickbeard import", "re.UNICODE) _end = re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i] =", "= re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE) #remove brackets", "self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i],", "time range line self._sub_list[i] = str(_time_start.hour) + \":\" + str(_time_start.minute) + \":\" +", "the highest count if _count_french > _count_english: logger.log(\"TidySub : INFO: Guessed language is", "whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #remove space before closing", "self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i],", "dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) if", "= False #Path to the subtitles file if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub =", "+= 1 #French: Try to correct punctuation in the subtitles file with regex", "i = 0 while i < len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del self._sub_list[i]", "\"-\": _time_start -= _time_offset _time_end -= _time_offset #create the new time range line", "= re.sub(r' \\!', '!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")',", "codecs.open(path_to_file, \"r\", \"utf-8\") except: logger.log(\"TidySub : File not encoded in UTF-8 neither in", "logger.DEBUG) else: logger.log(\"TidySub : Formatting error : number line\", logger.DEBUG) i += j", "if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string else: #correct hours if re.match(r'^[0-9]{1}\\:', string): string =", "team names into one regex string combined = \"(\" + \")|(\".join(strings.get_teams()) + \")\"", "hours if re.match(r'^[0-9]{1}\\:', string): string = re.sub(r'^', '0', string, re.UNICODE) #correct minutes if", "== len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] break #Check the", "re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if", "logger.DEBUG) return #Load a text file into a list in utf8 def _load_file(self,", "was loaded\", logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$', '', path_to_sub.lower())", "combined = \"(\" + \")|(\".join(strings.get_teams()) + \")\" i = 0 while i <", "'', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate the new time if _sign == \"+\":", "self._sub_list[i], re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE)", "not \"\" and re.match('^[0-9]+$', self._sub_list[i+5]): j += 3 elif self._sub_list[i+3] is not \"\"", "re.sub(r' \\!', '!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '!", "= re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) i += 1 #Remove music from", "self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[", "return except: try: fileToRead = codecs.open(path_to_file, \"r\", \"utf-8\") except: logger.log(\"TidySub : File not", "0 or _ms >= 1000: logger.log(\"TidySub : Milisecond is not correct for offset\",", "import codecs import re from datetime import timedelta from datetime import datetime from", "only corrects .srt files\", logger.DEBUG) return self._team_list = list() self._sub_list = list() #Load", "= re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE) #remove brackets split in two lines if i", "list() #Load the subtitles file self._sub_list = self._load_file(self._path_to_sub, True) if self._sub_list is not", "+ string[9:len(string)] return string #Try to correct the format of the time def", "elif _sign == \"-\": _time_start -= _time_offset _time_end -= _time_offset #create the new", "the words for each language while i < len(self._sub_list): if re.search(_french, self._sub_list[i].lower(), re.UNICODE):", "return self._team_list = list() self._sub_list = list() #Load the subtitles file self._sub_list =", "Milisecond is not correct for offset\", logger.DEBUG) _correct = False if not _correct:", "False if (not isinstance(_minute, int)) or _minute < 0 or _minute >= 60:", "re.UNICODE) i += 1 #English: Try to correct punctuation in the subtitles file", "self._sub_list[i+1]): self._clean_time_range(i+1) j += 1 #Exception if last line if (i+1) == len(self._sub_list)-1:", ": File does not exist or sub is in mkv\", logger.DEBUG) return except:", "#Remove unwanted blank lines self._clean_blank_lines() #Remove BOM character self._sub_list[0] = re.sub(u'\\ufeff', '', self._sub_list[0],", "error : timerange\", logger.DEBUG) else: logger.log(\"TidySub : Formatting error : number line\", logger.DEBUG)", "True #If the EOL must be removed if removeEOL: for i in fileToRead:", "if self._sub_list[i+2] == \"\" and re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i] del self._sub_list[i] del self._sub_list[i]", "string = re.sub(r'^', '0', string, re.UNICODE) #correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string =", "self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i], re.UNICODE) #Correct hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE):", "not re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i] continue i += 1 #Delete 1st line if", "if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:', ' :', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^", "INFO: Subtitles file loaded\", logger.DEBUG) return #Load a text file into a list", "re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE) #Delete unnecessary lines i = 0 count = 1", "toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub : INFO: Subtitles file saved\", logger.DEBUG) #Try to detect", "re.UNICODE): self._sub_list[i] = re.sub(r'\\:', ' :', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "file was loaded\", logger.DEBUG) return fileDest = codecs.open(path_to_file, \"w\", \"latin-1\") for i in", "EOL must be removed if removeEOL: for i in fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for", "\"w\", \"latin-1\") for i in toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub : INFO: Subtitles file", "in UTF-8 neither in latin-1\", logger.DEBUG) return return tempList = list () self._is_file_loaded", "\\t]*\\:', '', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[", "+ re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract end time and", "re.UNICODE): self._sub_list[i] = re.sub(r' \\<\\/', '</', self._sub_list[i], re.UNICODE) i += 1 #English: Try", "and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]',", "len(self._sub_list): if re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french += 1 if re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english", "#Clean Hi in the subtitles file with regex def _clean_hi(self): i = 0", "string): string = string[0:3] + \"0\" + string[3:len(string)] #correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string):", "a file def _write_file(self, path_to_file, toWrite): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles", "]{2,}', ' ', self._sub_list[i], re.UNICODE) #Remove line with just a single hyphen self._sub_list[i]", "re.UNICODE) #remove brackets split in two lines if i < (len(self._sub_list) - 1)", "del self._sub_list[i+1] continue elif (i+2) == len(self._sub_list)-1: break elif (i+3) == len(self._sub_list)-1: break", "if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string = string[0:9] + \"0\" + string[9:len(string)] return string #Try", "re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate the new time if _sign == \"+\": _time_start +=", "#Remove unwanted blank lines in the subtitles file def _clean_blank_lines(self): #Remove a blank", "= False if (not isinstance(_ms, int)) or _ms < 0 or _ms >=", "= False if (not isinstance(_minute, int)) or _minute < 0 or _minute >=", "- 1) and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and not re.match(r'\\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)',", "Removing teams names\", logger.DEBUG) #Call the function self._clean_team() #If music strings must be", "of the words for each language while i < len(self._sub_list): if re.search(_french, self._sub_list[i].lower(),", "+ \",\" + str(_time_start.microsecond/1000) + \" --> \" + \\ str(_time_end.hour) + \":\"", "subtitles def Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language = \"\"): if not self._is_file_loaded:", "and content self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE) #remove braces split in two", "in latin-1\", logger.DEBUG) return return tempList = list () self._is_file_loaded = True #If", "file with regex def _clean_punctuation_en(self): i = 0 while i < len(self._sub_list): if", "'.', self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ',", "offset\", logger.DEBUG) _correct = False if (not isinstance(_ms, int)) or _ms < 0", "re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+[^", "exist or sub is in mkv\", logger.DEBUG) return except: try: fileToRead = codecs.open(path_to_file,", "del self._sub_list[i] del self._sub_list[i] del self._sub_list[i] continue elif self._sub_list[i+2] == \"\" and not", "0 # Count the number of occurences of the words for each language", "if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] break #Check the second line #Check if", "\" + self._clean_time_format(_end) #Main function to clean subtitles def Clean(self, removeHi=False, removeTeam=False, removeMusic=False,", "return fileDest = codecs.open(path_to_file, \"w\", \"latin-1\") for i in toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub", "#correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string = string[0:6] + \"0\" + string[6:len(string)] #correct", "File does not exist or sub is in mkv\", logger.DEBUG) return except: try:", "+= j #Re add the EOL character i = 0 while i <", "self._sub_list[i] del self._sub_list[i] del self._sub_list[i] continue elif self._sub_list[i+2] == \"\" and not re.match('^[0-9]+$',", "i = 0 while i < len(self._sub_list): if re.search(combined, self._sub_list[i], re.UNICODE): del self._sub_list[i]", "_count_french > _count_english: logger.log(\"TidySub : INFO: Guessed language is French\", logger.DEBUG) return \"fr\"", "= list () self._is_file_loaded = True #If the EOL must be removed if", "import strings from sickbeard import logger #Definition of the TidySub class class TidySub:", "path_to_sub else: logger.log(\"TidySub : TidySub only corrects .srt files\", logger.DEBUG) return self._team_list =", "'?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i],", "= re.sub(r'\\;', ' ;', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;',", "= re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i]", "re.match(r'\\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE)", "and not re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i] continue i += 1 #Delete 1st line", "\" + \\ str(_time_end.hour) + \":\" + str(_time_end.minute) + \":\" + str(_time_end.second) +", "#Try to correct the format of the time def _clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}", "False #Path to the subtitles file if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub = path_to_sub", "_end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i] = self._clean_time_format(_start) + \" --> \" +", "the language which has the highest count if _count_french > _count_english: logger.log(\"TidySub :", "if (i+1) == len(self._sub_list)-1: del self._sub_list[i+1] continue elif (i+2) == len(self._sub_list)-1: break elif", "is not \"\" and re.match('^[0-9]+$', self._sub_list[i+5]): j += 3 elif self._sub_list[i+3] is not", "if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\<\\/', '</', self._sub_list[i], re.UNICODE) i", "self._sub_list[i+4]): del self._sub_list[i+3] continue elif self._sub_list[i+3] == \"\" and re.match('^[0-9]+$', self._sub_list[i+4]): j +=", "+= '\\r\\n' i += 1 #Remove unwanted blank lines in the subtitles file", "self._sub_list[i], re.UNICODE) #Correct hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'^\\-', '- ',", "not exist or sub is in mkv\", logger.DEBUG) return except: try: fileToRead =", "= True #If the EOL must be removed if removeEOL: for i in", "' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate the new time if", "if (not isinstance(_hour, int)) or _hour < 0 or _hour > 5: logger.log(\"TidySub", "re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string = string[0:3] + \"0\" + string[3:len(string)] #correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,',", "\"0\" + string[9:len(string)] return string #Try to correct the format of the time", "re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE) #Correct", "self._sub_list[i] = re.sub(r' \\:', ':', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "new time range line self._sub_list[i] = str(_time_start.hour) + \":\" + str(_time_start.minute) + \":\"", "if removeEOL: for i in fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for i in fileToRead: tempList.append(i)", "> _count_french: logger.log(\"TidySub : INFO: Guessed language is English\", logger.DEBUG) return \"en\" else:", "before closing balise if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\<\\/', '</',", "re.UNICODE): # self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if", "< (len(self._sub_list) - 1) and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and not re.match(r'\\]', self._sub_list[i], re.UNICODE)", "]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:', ' :', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE):", "= timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i = 0 while i < len(self._sub_list): if", "_second >= 60: logger.log(\"TidySub : Second is not correct for offset\", logger.DEBUG) _correct", "' ', self._sub_list[i], re.UNICODE) #Correct comma if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'", "', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '", "logger.DEBUG) self._clean_music() #If Hi must be removed if removeHi: logger.log(\"TidySub : INFO: Removing", "self._sub_list[i] = re.sub(r'\\? \\!', '?!', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\? \\? \\?', '???',", "file is loaded self._is_file_loaded = False #Path to the subtitles file if re.match(r'^.+\\.srt$',", "i += 1 #Remove music from line def _clean_music(self): i = 0 while", "self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)', '', self._sub_list[i+1],", "= False if not _correct: return False #Save time to offset into a", "str('1') count = 1 else: self._sub_list[i] = str(count) #Exception if last line if", "= re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language() def _guess_language(self): if not", "\\.', '...', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.', '..', self._sub_list[i], re.UNICODE) #remove leading", "self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate the new time if _sign == \"+\": _time_start", "\"((\" + \")|(\".join(strings.get_guess_english(),True) + \"))\" + \"([ ]|$)\" _count_french = 0 _count_english =", "re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE) #delete empty balise self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '',", "self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\? \\? \\?', '???', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\.", "#Remove a blank line if it is not before a number i =", "removed if removeHi: logger.log(\"TidySub : INFO: Removing HI\", logger.DEBUG) self._clean_hi() #If punctuation must", "== \"en\": logger.log(\"TidySub : INFO: Correcting punctuation (English)\", logger.DEBUG) self._clean_punctuation_en() #Clean the formatting", "i = 0 # Count the number of occurences of the words for", "self._sub_list[i] = re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE) #remove parentheses split in two lines if", "if _language == \"fr\": logger.log(\"TidySub : INFO: Correcting punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr() elif", "%H:%M:%S,%f') #Calculate the new time if _sign == \"+\": _time_start += _time_offset _time_end", "re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string = string[0:9] + \"00\" + string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string", "self._sub_list[i] = re.sub(r' \\,', ',', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "utf-8 -*- import codecs import re from datetime import timedelta from datetime import", "blank lines #Test numbers #Formatting of time def _clean_formatting(self): #Remove unwanted blank lines", "del self._sub_list[i] continue i += 1 #Clean formatting #Remove blank lines #Test numbers", "Regex for team words def _clean_team(self): #combine team names into one regex string", "', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!',", "logger.log(\"TidySub : INFO: Removing teams names\", logger.DEBUG) #Call the function self._clean_team() #If music", "logger.log(\"TidySub : Bad sign for offset\", logger.DEBUG) _correct = False if (not isinstance(_hour,", "= re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "i = 0 while i < len(self._sub_list)-1: self._sub_list[i] += '\\r\\n' i += 1", "+ \":\" + str(_time_start.minute) + \":\" + str(_time_start.second) + \",\" + str(_time_start.microsecond/1000) +", "+= 2 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] == \"\" and not", "== \"\" and re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i] del self._sub_list[i] del self._sub_list[i] continue elif", "self._sub_list[i], re.UNICODE) #remove braces split in two lines if i < (len(self._sub_list) -", "self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(),", "' ', self._sub_list[i], re.UNICODE) #remove space before closing balise if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i],", "logger.DEBUG) #Call the function self._clean_team() #If music strings must be removed if removeMusic:", "INFO: Removing teams names\", logger.DEBUG) #Call the function self._clean_team() #If music strings must", "if (not isinstance(_second, int)) or _second < 0 or _second >= 60: logger.log(\"TidySub", "1 if i == 0: self._sub_list[i] = str('1') count = 1 else: self._sub_list[i]", "offset\", logger.DEBUG) _correct = False if (not isinstance(_minute, int)) or _minute < 0", "for offset\", logger.DEBUG) _correct = False if not _correct: return False #Save time", "', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'", "[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract end time and save in timedelta", "re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE) #Correct dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "of speaker in front of the line self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '',", "self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\?',", "]|$)\" _english = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_english(),True) + \"))\" + \"([", "count = 1 while i < len(self._sub_list): j = 1 #If the line", "#remove brackets and content self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE) #remove brackets split", "self._sub_list[i+2] continue #if 3rd line is not empty elif self._sub_list[i+3] == \"\" and", "= re.sub(r' \\:', ':', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)',", "Bad sign for offset\", logger.DEBUG) _correct = False if (not isinstance(_hour, int)) or", "= \"\"): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG)", "blank lines self._clean_blank_lines() #Remove BOM character self._sub_list[0] = re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE) #Delete", "re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\;', ';', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i],", "the cleaning and formatting actions\"\"\" def __init__(self, path_to_sub): #Boolean to stock if file", "_ms=0): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return", "re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] =", "re.UNICODE): self._sub_list[i] = re.sub(r'\\?', ' ?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "\"r\", \"latin-1\") except IOError: logger.log(\"TidySub : File does not exist or sub is", "logger.DEBUG) return \"en\" else: return \"undefined\" #Test Regex for team words def _clean_team(self):", "\"0\" + string[3:len(string)] #correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string = string[0:6] + \"0\"", "re.UNICODE) #Delete unnecessary lines i = 0 count = 1 while i <", "logger.log(\"TidySub : Hour is not correct for offset\", logger.DEBUG) _correct = False if", "not \"\" and self._sub_list[i+4] is not \"\" and self._sub_list[i+5] is not \"\" and", "not correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_minute, int)) or", "self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+ \\!\",self._sub_list[i],", "= re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE) #remove braces", "in two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE)", "str(_time_start.hour) + \":\" + str(_time_start.minute) + \":\" + str(_time_start.second) + \",\" + str(_time_start.microsecond/1000)", "has the highest count if _count_french > _count_english: logger.log(\"TidySub : INFO: Guessed language", "= 0 while i < len(self._sub_list)-1: if self._sub_list[i] == \"\" and not re.match('^[0-9]+$',", "', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\;',", "def Offset(self, _sign, _hour=0, _minute=0, _second=0, _ms=0): if not self._is_file_loaded: logger.log(\"TidySub : No", "a list in utf8 def _load_file(self, path_to_file, removeEOL=False): try: fileToRead = codecs.open(path_to_file, \"r\",", "expressions self._sub_list[i] = re.sub(r'\\? \\!', '?!', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\? \\? \\?',", "and not re.match(r'\\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$', '',", "i += j #Re add the EOL character i = 0 while i", "= re.sub(r'\\. \\.', '..', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i] =", "file into a list in utf8 def _load_file(self, path_to_file, removeEOL=False): try: fileToRead =", "self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}', '', self._sub_list[i+1],", "< len(self._sub_list): if re.search(combined, self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean", "a text file into a list in utf8 def _load_file(self, path_to_file, removeEOL=False): try:", "No subtitles file was loaded\", logger.DEBUG) return #combine words into one regex string", "#If the third line is empty and 4th is a number again if", "seconds=_second, microseconds=(_ms*1000)) i = 0 while i < len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove", "+ \\ str(_time_end.hour) + \":\" + str(_time_end.minute) + \":\" + str(_time_end.second) + \",\"", "0 while i < len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i", "re.UNICODE): del self._sub_list[i] continue i += 1 #Clean formatting #Remove blank lines #Test", "elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\" and self._sub_list[i+5] is", "1) and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and not re.match(r'\\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1],", "minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i = 0 while i < len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]):", "to determine the language of the file if not force_language: _language = self._detect_language(self._path_to_sub)", "+ \",\" + str(_time_end.microsecond/1000) #correct the time range line format self._clean_time_range(i) #re add", "logger.DEBUG) return fileDest = codecs.open(path_to_file, \"w\", \"latin-1\") for i in toWrite: fileDest.write(i) fileDest.close()", "re.UNICODE) #Correct semi-colon if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\;', ';', self._sub_list[i],", "correct the format of the time def _clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$',", "re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i] = self._clean_time_format(_start) + \" --> \"", "= str(count) #Exception if last line if i == len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if", "the file containing regex for removal and perform the cleaning and formatting actions\"\"\"", "str(_time_start.microsecond/1000) + \" --> \" + \\ str(_time_end.hour) + \":\" + str(_time_end.minute) +", "self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE)", "is not empty elif self._sub_list[i+3] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3]", "utf8 def _load_file(self, path_to_file, removeEOL=False): try: fileToRead = codecs.open(path_to_file, \"r\", \"latin-1\") except IOError:", "\"\": del self._sub_list[0] #Delete last line if blank if self._sub_list[len(self._sub_list)-1] == \"\": del", "content self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE) #remove braces split in two lines", "< 0 or _minute >= 60: logger.log(\"TidySub : Minute is not correct for", "logger.DEBUG) _correct = False if (not isinstance(_second, int)) or _second < 0 or", "tempList.append(i.rstrip('\\n\\r')) else: for i in fileToRead: tempList.append(i) fileToRead.close() return tempList #Write a file", "'', self._sub_list[i], re.UNICODE) i += 1 #French: Try to correct punctuation in the", "re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ' ;', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "line self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i], re.UNICODE) #remove leading and trailing", "]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ' ;', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE):", "parentheses and content self._sub_list[i] = re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE) #remove parentheses split in", "+ \"0\" + string[6:len(string)] #correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string = string[0:9] +", "re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "'', self._sub_list[i+1], re.UNICODE) #remove brackets and content self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE)", "string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string = string[0:9] + \"0\" + string[9:len(string)] return string", "re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE)", "for each language while i < len(self._sub_list): if re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french +=", "'. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "\"\" and re.match('^[0-9]+$', self._sub_list[i+4]): j += 2 elif self._sub_list[i+3] is not \"\" and", "to stock if file is loaded self._is_file_loaded = False #Path to the subtitles", "#Formatting of time def _clean_formatting(self): #Remove unwanted blank lines self._clean_blank_lines() #Remove BOM character", "+ \"([ ]|$)\" _count_french = 0 _count_english = 0 i = 0 #", "False if (not isinstance(_second, int)) or _second < 0 or _second >= 60:", "\")|(\".join(strings.get_guess_french(),True) + \"))\" + \"([ ]|$)\" _english = \"(^|[ ])\" + \"((\" +", "re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:', ' :', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i],", "the EOL character i = 0 while i < len(self._sub_list)-1: self._sub_list[i] += '\\r\\n'", "= 0 while i < len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del self._sub_list[i] continue", "\\:', ':', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ',", "_guess_language(self): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return", "3rd line is not empty elif self._sub_list[i+3] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+4]):", "fileToRead = codecs.open(path_to_file, \"r\", \"latin-1\") except IOError: logger.log(\"TidySub : File does not exist", "i in toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub : INFO: Subtitles file saved\", logger.DEBUG) #Try", "re.UNICODE) #delete empty balise self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i], re.UNICODE) i", "re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE) #remove braces and", "re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE) #remove brackets split in two lines if i <", "teams names\", logger.DEBUG) #Call the function self._clean_team() #If music strings must be removed", "if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\,', ',', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^", "= re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE) #remove name of", "re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading and trailing spaces self._sub_list[i] =", "self._sub_list[i+6]): j += 4 count += 1 else: logger.log(\"TidySub : Formatting error :", "self._sub_list[i] += '\\r\\n' i += 1 #Write the new SRT file self._write_file(self._path_to_sub, self._sub_list)", "#Extract end time and save in timedelta _time_end = datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s',", "\",\" + str(_time_end.microsecond/1000) #correct the time range line format self._clean_time_range(i) #re add EOL", "logger.log(\"TidySub : INFO: Correcting punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr() elif _language == \"en\": logger.log(\"TidySub", "EOL self._sub_list[i] = re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE) #Extract start time and save in", "line format self._clean_time_range(i) #re add EOL self._sub_list[i] += '\\r\\n' i += 1 #Write", "file def _clean_blank_lines(self): #Remove a blank line if it is not before a", "_time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i = 0 while i < len(self._sub_list):", "not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading and trailing spaces self._sub_list[i]", "_hour < 0 or _hour > 5: logger.log(\"TidySub : Hour is not correct", "\\!', '!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ',", "\\. \\.', '...', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.', '..', self._sub_list[i], re.UNICODE) #remove", "if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i], re.UNICODE) #Correct hyphen", "file self._write_file(self._path_to_sub, self._sub_list) def Offset(self, _sign, _hour=0, _minute=0, _second=0, _ms=0): if not self._is_file_loaded:", ">= 1000: logger.log(\"TidySub : Milisecond is not correct for offset\", logger.DEBUG) _correct =", "re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]', '',", "4 count += 1 else: logger.log(\"TidySub : Formatting error : timerange\", logger.DEBUG) else:", "self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] break #Check the second line #Check if it's", "re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+", "1 #Delete 1st line if blank if self._sub_list[0] == \"\": del self._sub_list[0] #Delete", "timedelta _time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i = 0 while i <", "self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE)", "not re.match(r'\\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$', '', self._sub_list[i],", "]{2,}', ' ', self._sub_list[i], re.UNICODE) #remove space before closing balise if re.search(r' \\<\\/[^", "_second=0, _ms=0): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG)", "_count_french: logger.log(\"TidySub : INFO: Guessed language is English\", logger.DEBUG) return \"en\" else: return", "file was loaded\", logger.DEBUG) return _correct = True # Check consistency of the", "codecs.open(path_to_file, \"w\", \"latin-1\") for i in toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub : INFO: Subtitles", "+= 4 count += 1 else: logger.log(\"TidySub : Formatting error : timerange\", logger.DEBUG)", "while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]): #remove", "str(_time_start.second) + \",\" + str(_time_start.microsecond/1000) + \" --> \" + \\ str(_time_end.hour) +", ": timerange\", logger.DEBUG) else: logger.log(\"TidySub : Formatting error : number line\", logger.DEBUG) i", "#French: Try to correct punctuation in the subtitles file with regex def _clean_punctuation_fr(self):", "re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string = string[0:9] + \"0\" + string[9:len(string)] return string #Try to", "\":\" + str(_time_end.minute) + \":\" + str(_time_end.second) + \",\" + str(_time_end.microsecond/1000) #correct the", ": No subtitles file was loaded\", logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub", "in fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for i in fileToRead: tempList.append(i) fileToRead.close() return tempList #Write", "return \"en\" else: return \"undefined\" #Test Regex for team words def _clean_team(self): #combine", "re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i], re.UNICODE) #Correct hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "comma if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\,', ',', self._sub_list[i], re.UNICODE) if", "#If Hi must be removed if removeHi: logger.log(\"TidySub : INFO: Removing HI\", logger.DEBUG)", "if self._sub_list is not None: logger.log(\"TidySub : INFO: Subtitles file loaded\", logger.DEBUG) return", "else: self._sub_list[i] = str(count) #Exception if last line if i == len(self._sub_list)-1: del", "_clean_hi(self): i = 0 while i < len(self._sub_list): #remove parentheses and content self._sub_list[i]", "mark if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\!', '!', self._sub_list[i], re.UNICODE) if", "and not re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2] continue #if 3rd line is not empty", "and content self._sub_list[i] = re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE) #remove parentheses split in two", "self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\. \\.', '...', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\.", "or _second >= 60: logger.log(\"TidySub : Second is not correct for offset\", logger.DEBUG)", "loaded self._is_file_loaded = False #Path to the subtitles file if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE):", "re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language() def _guess_language(self): if not self._is_file_loaded:", "time and save in timedelta _time_start = datetime.strptime('01/01/10 ' + re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$',", "from regex import strings from sickbeard import logger #Definition of the TidySub class", "self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string else: #correct hours if", "return #Load a text file into a list in utf8 def _load_file(self, path_to_file,", "neither in latin-1\", logger.DEBUG) return return tempList = list () self._is_file_loaded = True", "\"latin-1\") for i in toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub : INFO: Subtitles file saved\",", "def _load_file(self, path_to_file, removeEOL=False): try: fileToRead = codecs.open(path_to_file, \"r\", \"latin-1\") except IOError: logger.log(\"TidySub", "or _minute < 0 or _minute >= 60: logger.log(\"TidySub : Minute is not", "del self._sub_list[i] continue i += 1 #Delete 1st line if blank if self._sub_list[0]", "punctuation in the subtitles file with regex def _clean_punctuation_fr(self): i = 0 while", "def Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language = \"\"): if not self._is_file_loaded: logger.log(\"TidySub", "not empty elif self._sub_list[i+3] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3] continue", "tempList = list () self._is_file_loaded = True #If the EOL must be removed", "punctuation (English)\", logger.DEBUG) self._clean_punctuation_en() #Clean the formatting before saving the subtitles self._clean_formatting() #Write", "]{2,}', ' ', self._sub_list[i], re.UNICODE) #Correct comma if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "#remove braces split in two lines if i < (len(self._sub_list) - 1) and", "#If music strings must be removed if removeMusic: logger.log(\"TidySub : INFO: Removing lyrics\",", "'', self._sub_list[i], re.UNICODE) #remove parentheses split in two lines if i < (len(self._sub_list)", "re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE) #remove brackets and content self._sub_list[i] =", "logger.DEBUG) return _correct = True # Check consistency of the parameters if _sign", "re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end = re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE)", "< 0 or _second >= 60: logger.log(\"TidySub : Second is not correct for", "self._clean_punctuation_en() #Clean the formatting before saving the subtitles self._clean_formatting() #Write file self._write_file(self._path_to_sub, self._sub_list)", "try: fileToRead = codecs.open(path_to_file, \"r\", \"utf-8\") except: logger.log(\"TidySub : File not encoded in", "', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\:',", "subtitles file was loaded\", logger.DEBUG) return fileDest = codecs.open(path_to_file, \"w\", \"latin-1\") for i", "line\", logger.DEBUG) i += j #Re add the EOL character i = 0", "\\t]+$', '', self._sub_list[i], re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ',", "self._sub_list[0], re.UNICODE) #Delete unnecessary lines i = 0 count = 1 while i", "continue elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\" and re.match('^[0-9]+$',", "#Test Regex for team words def _clean_team(self): #combine team names into one regex", "character self._sub_list[0] = re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE) #Delete unnecessary lines i = 0", "_ms < 0 or _ms >= 1000: logger.log(\"TidySub : Milisecond is not correct", "re.match('^[0-9]+$', self._sub_list[i+4]): j += 2 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] ==", "self._sub_list[i] += '\\r\\n' i += 1 #Remove unwanted blank lines in the subtitles", "the subtitles self._clean_formatting() #Write file self._write_file(self._path_to_sub, self._sub_list) def Offset(self, _sign, _hour=0, _minute=0, _second=0,", "1 #Remove music from line def _clean_music(self): i = 0 while i <", "codecs import re from datetime import timedelta from datetime import datetime from regex", "\"\" and self._sub_list[i+4] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4] continue elif", "in the subtitles file def _clean_blank_lines(self): #Remove a blank line if it is", "Offset(self, _sign, _hour=0, _minute=0, _second=0, _ms=0): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles", "if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation", "re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language() def _guess_language(self):", "self._sub_list[i+1] continue elif (i+2) == len(self._sub_list)-1: break elif (i+3) == len(self._sub_list)-1: break #If", "logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return #Try to determine the", "logger.log(\"TidySub : File does not exist or sub is in mkv\", logger.DEBUG) return", "and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}',", "elif self._sub_list[i+3] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3] continue elif self._sub_list[i+3]", "self._sub_list[i] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i] continue i += 1", "i < len(self._sub_list)-1: if self._sub_list[i] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i]", "logger.log(\"TidySub : INFO: Correcting punctuation (English)\", logger.DEBUG) self._clean_punctuation_en() #Clean the formatting before saving", "Formatting error : number line\", logger.DEBUG) i += j #Re add the EOL", "def _clean_blank_lines(self): #Remove a blank line if it is not before a number", "self._sub_list[i+3] continue elif self._sub_list[i+3] == \"\" and re.match('^[0-9]+$', self._sub_list[i+4]): j += 2 elif", "(len(self._sub_list) - 1) and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and not re.match(r'\\}', self._sub_list[i], re.UNICODE) and", "must be corrected if correct_punctuation: if _language == \"fr\": logger.log(\"TidySub : INFO: Correcting", "re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\,', ',', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i],", "= False if (not isinstance(_hour, int)) or _hour < 0 or _hour >", "the line self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i], re.UNICODE) #remove leading and", "with just a single hyphen self._sub_list[i] = re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE) #delete empty", "string = string[0:3] + \"0\" + string[3:len(string)] #correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string", "re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE)", "isinstance(_minute, int)) or _minute < 0 or _minute >= 60: logger.log(\"TidySub : Minute", "numbers #Formatting of time def _clean_formatting(self): #Remove unwanted blank lines self._clean_blank_lines() #Remove BOM", "actions\"\"\" def __init__(self, path_to_sub): #Boolean to stock if file is loaded self._is_file_loaded =", "i < len(self._sub_list): if re.search(combined, self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1", "music from line def _clean_music(self): i = 0 while i < len(self._sub_list): if", "be 1 if i == 0: self._sub_list[i] = str('1') count = 1 else:", "#Save time to offset into a timedelta _time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000))", "def _clean_punctuation_fr(self): i = 0 while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i])", "self._sub_list[i] = re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE) #remove", "i += 1 #Remove unwanted blank lines in the subtitles file def _clean_blank_lines(self):", "fileToRead.close() return tempList #Write a file def _write_file(self, path_to_file, toWrite): if not self._is_file_loaded:", "if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return", "correct punctuation in the subtitles file with regex def _clean_punctuation_fr(self): i = 0", "= re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE) #delete empty balise self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>',", "str(_time_end.minute) + \":\" + str(_time_end.second) + \",\" + str(_time_end.microsecond/1000) #correct the time range", "= re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "in two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE)", "Hi must be removed if removeHi: logger.log(\"TidySub : INFO: Removing HI\", logger.DEBUG) self._clean_hi()", "start time and save in timedelta _time_start = datetime.strptime('01/01/10 ' + re.sub(r' \\-\\-\\>", ": No subtitles file was loaded\", logger.DEBUG) return fileDest = codecs.open(path_to_file, \"w\", \"latin-1\")", "re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)',", "re.UNICODE) #Correct comma if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\,', ',', self._sub_list[i],", "break #Check the second line #Check if it's a time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+',", "not \"\" and self._sub_list[i+4] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+5]): j += 3", "\"+\": _time_start += _time_offset _time_end += _time_offset elif _sign == \"-\": _time_start -=", "0 while i < len(self._sub_list)-1: self._sub_list[i] += '\\r\\n' i += 1 #Remove unwanted", "'', self._sub_list[i+1], re.UNICODE) #remove braces and content self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE)", "#Delete last line if blank if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self,", "Subtitles file saved\", logger.DEBUG) #Try to detect subtitles language def _detect_language(self, path_to_sub): if", "re.sub(r' \\,', ',', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ',", "number line\", logger.DEBUG) i += j #Re add the EOL character i =", "self._sub_list[i] = re.sub(r'\\? \\? \\?', '???', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\. \\.',", "toWrite): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return", "not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return _correct =", "-*- coding: utf-8 -*- import codecs import re from datetime import timedelta from", "sickbeard import logger #Definition of the TidySub class class TidySub: \"\"\"Load the subtitle,", "= re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE) #Extract start time and save in timedelta _time_start", "'', self._sub_list[i], re.UNICODE) #remove brackets split in two lines if i < (len(self._sub_list)", "file was loaded\", logger.DEBUG) return #Try to determine the language of the file", "\",\" + str(_time_start.microsecond/1000) + \" --> \" + \\ str(_time_end.hour) + \":\" +", "self._sub_list[i] = re.sub(r'\\;', ' ;', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "with regex def _clean_hi(self): i = 0 while i < len(self._sub_list): #remove parentheses", "#Load a text file into a list in utf8 def _load_file(self, path_to_file, removeEOL=False):", "if re.match(r'^[0-9]{1}\\:', string): string = re.sub(r'^', '0', string, re.UNICODE) #correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:',", "the subtitles file if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub = path_to_sub else: logger.log(\"TidySub :", "--> \" + self._clean_time_format(_end) #Main function to clean subtitles def Clean(self, removeHi=False, removeTeam=False,", "and re.match('^[0-9]+$', self._sub_list[i+5]): j += 3 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4]", "60: logger.log(\"TidySub : Second is not correct for offset\", logger.DEBUG) _correct = False", "0 while i < len(self._sub_list): #remove parentheses and content self._sub_list[i] = re.sub(r'\\([^)]*\\)', '',", "in toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub : INFO: Subtitles file saved\", logger.DEBUG) #Try to", "subtitles file with regex def _clean_punctuation_fr(self): i = 0 while i < len(self._sub_list):", "re.sub(r'\\?', ' ?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '.", "== 0: self._sub_list[i] = str('1') count = 1 else: self._sub_list[i] = str(count) #Exception", "logger.DEBUG) self._clean_hi() #If punctuation must be corrected if correct_punctuation: if _language == \"fr\":", "was loaded\", logger.DEBUG) return fileDest = codecs.open(path_to_file, \"w\", \"latin-1\") for i in toWrite:", "number of occurences of the words for each language while i < len(self._sub_list):", "re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j += 1 #Exception if last line if (i+1) ==", "_time_end = datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate the", "_load_file(self, path_to_file, removeEOL=False): try: fileToRead = codecs.open(path_to_file, \"r\", \"latin-1\") except IOError: logger.log(\"TidySub :", "range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j += 1 #Exception if last line if", "#delete empty balise self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i], re.UNICODE) i +=", "return string else: #correct hours if re.match(r'^[0-9]{1}\\:', string): string = re.sub(r'^', '0', string,", "string[0:9] + \"00\" + string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string = string[0:9] + \"0\"", "_time_start -= _time_offset _time_end -= _time_offset #create the new time range line self._sub_list[i]", "+ re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate the new time if _sign", "logger.log(\"TidySub : File not encoded in UTF-8 neither in latin-1\", logger.DEBUG) return return", "correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_ms, int)) or _ms", "= datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate the new", "re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.* \\:\",self._sub_list[i],", "1 while i < len(self._sub_list): j = 1 #If the line is a", "1 #If the line is a number if re.match('^[0-9]+$', self._sub_list[i]): #First line must", "path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language() def _guess_language(self): if not self._is_file_loaded: logger.log(\"TidySub :", "re.sub(r'\\? \\!', '?!', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\? \\? \\?', '???', self._sub_list[i], re.UNICODE)", "_count_french += 1 if re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english += 1 i += 1", "the team strings must be removed if removeTeam: logger.log(\"TidySub : INFO: Removing teams", "_time_end += _time_offset elif _sign == \"-\": _time_start -= _time_offset _time_end -= _time_offset", "self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return fileDest = codecs.open(path_to_file,", "strings from sickbeard import logger #Definition of the TidySub class class TidySub: \"\"\"Load", "= 0 while i < len(self._sub_list)-1: self._sub_list[i] += '\\r\\n' i += 1 #Remove", "loaded\", logger.DEBUG) return fileDest = codecs.open(path_to_file, \"w\", \"latin-1\") for i in toWrite: fileDest.write(i)", "semi-colon if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\;', ';', self._sub_list[i], re.UNICODE) if", "str(_time_end.second) + \",\" + str(_time_end.microsecond/1000) #correct the time range line format self._clean_time_range(i) #re", "+= 1 if re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english += 1 i += 1 #Return", "to detect subtitles language def _detect_language(self, path_to_sub): if not self._is_file_loaded: logger.log(\"TidySub : No", "the number of occurences of the words for each language while i <", "re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i], re.UNICODE) #Correct hyphen if", "== \"\" and not re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i] continue i += 1 #Delete", "self._clean_blank_lines() #Remove BOM character self._sub_list[0] = re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE) #Delete unnecessary lines", "= False if (not isinstance(_second, int)) or _second < 0 or _second >=", "in utf8 def _load_file(self, path_to_file, removeEOL=False): try: fileToRead = codecs.open(path_to_file, \"r\", \"latin-1\") except", "_language == \"fr\": logger.log(\"TidySub : INFO: Correcting punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr() elif _language", "re.UNICODE) and not re.match(r'\\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$',", "a single hyphen self._sub_list[i] = re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE) #delete empty balise self._sub_list[i]", "self._sub_list[i], re.UNICODE) _end = re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i]", "if self._sub_list[0] == \"\": del self._sub_list[0] #Delete last line if blank if self._sub_list[len(self._sub_list)-1]", "#Load the subtitles file self._sub_list = self._load_file(self._path_to_sub, True) if self._sub_list is not None:", "elif self._sub_list[i+2] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2] continue #if 3rd", "speaker in front of the line self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i],", "re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if", "correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_second, int)) or _second", "self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\" and self._sub_list[i+5] is not", "into a list in utf8 def _load_file(self, path_to_file, removeEOL=False): try: fileToRead = codecs.open(path_to_file,", "if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^", "logger #Definition of the TidySub class class TidySub: \"\"\"Load the subtitle, the file", "'. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i], re.UNICODE) #Correct", "self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return #combine words into", "regex import strings from sickbeard import logger #Definition of the TidySub class class", "and re.match('^[0-9]+$', self._sub_list[i+4]): j += 2 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4]", "+ string[6:len(string)] #correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string = string[0:9] + \"00\" +", "strings must be removed if removeTeam: logger.log(\"TidySub : INFO: Removing teams names\", logger.DEBUG)", "loaded\", logger.DEBUG) return #Try to determine the language of the file if not", ": number line\", logger.DEBUG) i += j #Re add the EOL character i", "re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE) #remove braces and content self._sub_list[i] =", "re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\:', ':', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i],", "self._sub_list[i+1] = re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE) #remove brackets and content self._sub_list[i] = re.sub(r'\\[[^)]*\\]',", "to the subtitles file if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub = path_to_sub else: logger.log(\"TidySub", "-*- import codecs import re from datetime import timedelta from datetime import datetime", "not regular expressions self._sub_list[i] = re.sub(r'\\? \\!', '?!', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\?", "'', self._sub_list[0], re.UNICODE) #Delete unnecessary lines i = 0 count = 1 while", "None: logger.log(\"TidySub : INFO: Subtitles file loaded\", logger.DEBUG) return #Load a text file", "self._detect_language(self._path_to_sub) else: _language = force_language #If the team strings must be removed if", "#correct the time range line format self._clean_time_range(i) #re add EOL self._sub_list[i] += '\\r\\n'", "re.sub(r' \\:', ':', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ':", "#First line must always be 1 if i == 0: self._sub_list[i] = str('1')", "+ \" --> \" + self._clean_time_format(_end) #Main function to clean subtitles def Clean(self,", ": Hour is not correct for offset\", logger.DEBUG) _correct = False if (not", "return return tempList = list () self._is_file_loaded = True #If the EOL must", "self._sub_list[i+2] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2] continue #if 3rd line", "self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] break #Check the second line #Check", "\"\"\"Load the subtitle, the file containing regex for removal and perform the cleaning", "= 0 while i < len(self._sub_list): #remove parentheses and content self._sub_list[i] = re.sub(r'\\([^)]*\\)',", "loaded\", logger.DEBUG) return #combine words into one regex string _french = \"(^|[ ])\"", "re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract end time and save in timedelta _time_end = datetime.strptime('01/01/10", "unwanted blank lines self._clean_blank_lines() #Remove BOM character self._sub_list[0] = re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE)", "re.UNICODE) #remove braces split in two lines if i < (len(self._sub_list) - 1)", "#remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #remove space", "#If the EOL must be removed if removeEOL: for i in fileToRead: tempList.append(i.rstrip('\\n\\r'))", "= re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Correct comma if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE):", "loaded\", logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return", "len(self._sub_list)-1: if self._sub_list[i] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i] continue i", "fileDest.write(i) fileDest.close() logger.log(\"TidySub : INFO: Subtitles file saved\", logger.DEBUG) #Try to detect subtitles", "#Correct semi-colon if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\;', ';', self._sub_list[i], re.UNICODE)", "\"\"): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return", "language which has the highest count if _count_french > _count_english: logger.log(\"TidySub : INFO:", "= self._load_file(self._path_to_sub, True) if self._sub_list is not None: logger.log(\"TidySub : INFO: Subtitles file", "= re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)',", "for offset\", logger.DEBUG) _correct = False if (not isinstance(_minute, int)) or _minute <", "#if 3rd line is not empty elif self._sub_list[i+3] == \"\" and not re.match('^[0-9]+$',", "not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return #combine words", "re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE) #Correct dots if", "spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i], re.UNICODE) #remove multiple whitespaces self._sub_list[i]", "re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question", "+ \"((\" + \")|(\".join(strings.get_guess_french(),True) + \"))\" + \"([ ]|$)\" _english = \"(^|[ ])\"", "not correct for offset\", logger.DEBUG) _correct = False if not _correct: return False", "j += 1 #Exception if last line if (i+1) == len(self._sub_list)-1: del self._sub_list[i+1]", "'%d/%m/%y %H:%M:%S,%f') #Calculate the new time if _sign == \"+\": _time_start += _time_offset", "_french = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_french(),True) + \"))\" + \"([ ]|$)\"", "]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.*[^", "and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i], re.UNICODE) #remove multiple", "+ \":\" + str(_time_start.second) + \",\" + str(_time_start.microsecond/1000) + \" --> \" +", "', self._sub_list[i], re.UNICODE) #Correct hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'^\\-', '-", "= codecs.open(path_to_file, \"w\", \"latin-1\") for i in toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub : INFO:", "#remove brackets split in two lines if i < (len(self._sub_list) - 1) and", "\"fr\": logger.log(\"TidySub : INFO: Correcting punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr() elif _language == \"en\":", "\"en\": logger.log(\"TidySub : INFO: Correcting punctuation (English)\", logger.DEBUG) self._clean_punctuation_en() #Clean the formatting before", "and _sign is not \"-\": logger.log(\"TidySub : Bad sign for offset\", logger.DEBUG) _correct", ": No subtitles file was loaded\", logger.DEBUG) return #Try to determine the language", "True) if self._sub_list is not None: logger.log(\"TidySub : INFO: Subtitles file loaded\", logger.DEBUG)", "':', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i],", "self._sub_list[i+1], re.UNICODE) #remove braces and content self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE) #remove", "count if _count_french > _count_english: logger.log(\"TidySub : INFO: Guessed language is French\", logger.DEBUG)", "EOL self._sub_list[i] += '\\r\\n' i += 1 #Write the new SRT file self._write_file(self._path_to_sub,", "for offset\", logger.DEBUG) _correct = False if (not isinstance(_hour, int)) or _hour <", "self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "+= 1 #Remove music from line def _clean_music(self): i = 0 while i", "\"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_english(),True) + \"))\" + \"([ ]|$)\" _count_french =", "file loaded\", logger.DEBUG) return #Load a text file into a list in utf8", "= re.sub(r' \\;', ';', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;',", "self._sub_list[i], re.UNICODE) #Correct comma if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\,', ',',", "Try to correct punctuation in the subtitles file with regex def _clean_punctuation_en(self): i", "#correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string = string[0:9] + \"00\" + string[9:len(string)] if", "fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for i in fileToRead: tempList.append(i) fileToRead.close() return tempList #Write a", "self._sub_list[i+4]): j += 2 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] == \"\"", "_time_offset _time_end -= _time_offset #create the new time range line self._sub_list[i] = str(_time_start.hour)", "]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE) #Correct dots if re.match(\"^.+", "logger.DEBUG) return self._team_list = list() self._sub_list = list() #Load the subtitles file self._sub_list", "else: logger.log(\"TidySub : TidySub only corrects .srt files\", logger.DEBUG) return self._team_list = list()", "self._clean_formatting() #Write file self._write_file(self._path_to_sub, self._sub_list) def Offset(self, _sign, _hour=0, _minute=0, _second=0, _ms=0): if", "correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_minute, int)) or _minute", "the function self._clean_team() #If music strings must be removed if removeMusic: logger.log(\"TidySub :", "path_to_sub): #Boolean to stock if file is loaded self._is_file_loaded = False #Path to", "subtitles file if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub = path_to_sub else: logger.log(\"TidySub : TidySub", "logger.log(\"TidySub : INFO: Subtitles file loaded\", logger.DEBUG) return #Load a text file into", "self._sub_list[i] = re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE) #Extract start time and save in timedelta", "IOError: logger.log(\"TidySub : File does not exist or sub is in mkv\", logger.DEBUG)", "is not \"-\": logger.log(\"TidySub : Bad sign for offset\", logger.DEBUG) _correct = False", "offset\", logger.DEBUG) _correct = False if (not isinstance(_hour, int)) or _hour < 0", "re.match(r'^[0-9]{1}\\:', string): string = re.sub(r'^', '0', string, re.UNICODE) #correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string):", "len(self._sub_list)-1: break #If the third line is empty and 4th is a number", "two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and", "== \"\" and re.match('^[0-9]+$', self._sub_list[i+4]): j += 2 elif self._sub_list[i+3] is not \"\"", "re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\!', '!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i],", "self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return _correct = True", "Minute is not correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_second,", "No subtitles file was loaded\", logger.DEBUG) return #Try to determine the language of", "#Check if it's a time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j += 1", "Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language = \"\"): if not self._is_file_loaded: logger.log(\"TidySub :", "self._sub_list[i] = self._clean_time_format(_start) + \" --> \" + self._clean_time_format(_end) #Main function to clean", "sign for offset\", logger.DEBUG) _correct = False if (not isinstance(_hour, int)) or _hour", "or _minute >= 60: logger.log(\"TidySub : Minute is not correct for offset\", logger.DEBUG)", "#Boolean to stock if file is loaded self._is_file_loaded = False #Path to the", "j #Re add the EOL character i = 0 while i < len(self._sub_list)-1:", "lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and not", "is not correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_second, int))", "len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL self._sub_list[i] = re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE) #Extract", "the format of the time def _clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]):", "< len(self._sub_list): #remove parentheses and content self._sub_list[i] = re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE) #remove", "1 #English: Try to correct punctuation in the subtitles file with regex def", "def _clean_hi(self): i = 0 while i < len(self._sub_list): #remove parentheses and content", "not _correct: return False #Save time to offset into a timedelta _time_offset =", "#Delete 1st line if blank if self._sub_list[0] == \"\": del self._sub_list[0] #Delete last", "the subtitle, the file containing regex for removal and perform the cleaning and", "last line if i == len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] == \"\": del", "time def _clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]):", "= True # Check consistency of the parameters if _sign is not \"+\"", "self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE) #remove brackets and content self._sub_list[i]", "re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct", "i < len(self._sub_list): #remove parentheses and content self._sub_list[i] = re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE)", "', self._sub_list[i], re.UNICODE) #Correct dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.',", "self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1]", "= 0 count = 1 while i < len(self._sub_list): j = 1 #If", "braces split in two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\{',", "logger.DEBUG) _correct = False if (not isinstance(_hour, int)) or _hour < 0 or", "import logger #Definition of the TidySub class class TidySub: \"\"\"Load the subtitle, the", "0 or _hour > 5: logger.log(\"TidySub : Hour is not correct for offset\",", "is English\", logger.DEBUG) return \"en\" else: return \"undefined\" #Test Regex for team words", "path_to_sub, re.UNICODE): self._path_to_sub = path_to_sub else: logger.log(\"TidySub : TidySub only corrects .srt files\",", "i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'',", "question mark if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\?', '?', self._sub_list[i], re.UNICODE)", "len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] break #Check the second", "_time_offset elif _sign == \"-\": _time_start -= _time_offset _time_end -= _time_offset #create the", "re.UNICODE) #Remove line with just a single hyphen self._sub_list[i] = re.sub(r'^\\-$', '', self._sub_list[i],", "#Correct exclamation mark if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\!', '!', self._sub_list[i],", "60: logger.log(\"TidySub : Minute is not correct for offset\", logger.DEBUG) _correct = False", "if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return", "count += 1 else: logger.log(\"TidySub : Formatting error : timerange\", logger.DEBUG) else: logger.log(\"TidySub", "which has the highest count if _count_french > _count_english: logger.log(\"TidySub : INFO: Guessed", "logger.log(\"TidySub : Formatting error : timerange\", logger.DEBUG) else: logger.log(\"TidySub : Formatting error :", "is not \"\" and self._sub_list[i+4] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4]", "re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract end time and save", "== \"fr\": logger.log(\"TidySub : INFO: Correcting punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr() elif _language ==", "path_to_sub): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return", "?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i],", "re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE)", "file saved\", logger.DEBUG) #Try to detect subtitles language def _detect_language(self, path_to_sub): if not", "]\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'^\\-', '- ', self._sub_list[i], re.UNICODE) #Correct not regular expressions", "def __init__(self, path_to_sub): #Boolean to stock if file is loaded self._is_file_loaded = False", "if file is loaded self._is_file_loaded = False #Path to the subtitles file if", "regex string combined = \"(\" + \")|(\".join(strings.get_teams()) + \")\" i = 0 while", "i < len(self._sub_list): if re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french += 1 if re.search(_english, self._sub_list[i].lower(),", "re.UNICODE) self._sub_list[i] = self._clean_time_format(_start) + \" --> \" + self._clean_time_format(_end) #Main function to", "'. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "self._clean_punctuation_fr() elif _language == \"en\": logger.log(\"TidySub : INFO: Correcting punctuation (English)\", logger.DEBUG) self._clean_punctuation_en()", "self._guess_language() def _guess_language(self): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\",", "self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE) #remove brackets split in two lines if", "if last line if i == len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] == \"\":", "del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] break #Check the second line", "self._clean_time_format(_start) + \" --> \" + self._clean_time_format(_end) #Main function to clean subtitles def", "re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) i += 1 #Remove music from line", ": Formatting error : timerange\", logger.DEBUG) else: logger.log(\"TidySub : Formatting error : number", "re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE) #remove name of speaker in front of the line", "if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i],", "removeEOL=False): try: fileToRead = codecs.open(path_to_file, \"r\", \"latin-1\") except IOError: logger.log(\"TidySub : File does", "logger.DEBUG) i += j #Re add the EOL character i = 0 while", "True # Check consistency of the parameters if _sign is not \"+\" and", "if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return #Try", "= re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i], re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[", "= re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE):", "= re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i], re.UNICODE) i += 1 #French: Try to", "if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return _correct", "if _sign is not \"+\" and _sign is not \"-\": logger.log(\"TidySub : Bad", "not correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_second, int)) or", "= re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE):", "self._sub_list[i+1] = re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE) #remove braces and content self._sub_list[i] = re.sub(r'\\{[^)]*\\}',", "lyrics\", logger.DEBUG) self._clean_music() #If Hi must be removed if removeHi: logger.log(\"TidySub : INFO:", "if removeHi: logger.log(\"TidySub : INFO: Removing HI\", logger.DEBUG) self._clean_hi() #If punctuation must be", "#Correct exclamation mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!', ' !', self._sub_list[i],", "= datetime.strptime('01/01/10 ' + re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract", "strings must be removed if removeMusic: logger.log(\"TidySub : INFO: Removing lyrics\", logger.DEBUG) self._clean_music()", "if not _correct: return False #Save time to offset into a timedelta _time_offset", "be removed if removeTeam: logger.log(\"TidySub : INFO: Removing teams names\", logger.DEBUG) #Call the", "re.UNICODE): self._sub_list[i] = re.sub(r'^\\-', '- ', self._sub_list[i], re.UNICODE) #Correct not regular expressions self._sub_list[i]", "path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language() def _guess_language(self): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles", "#!/usr/bin/env python # -*- coding: utf-8 -*- import codecs import re from datetime", "return string #Try to correct the format of the time def _clean_time_range(self, i):", "string): string = string[0:9] + \"00\" + string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string =", "logger.log(\"TidySub : Second is not correct for offset\", logger.DEBUG) _correct = False if", "self._sub_list[i] continue i += 1 #Delete 1st line if blank if self._sub_list[0] ==", "the subtitles file with regex def _clean_punctuation_fr(self): i = 0 while i <", "self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE):", "' !', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ',", "Second is not correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_ms,", "< len(self._sub_list): if re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french += 1 if re.search(_english, self._sub_list[i].lower(), re.UNICODE):", "re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): #", "datetime from regex import strings from sickbeard import logger #Definition of the TidySub", "+= 1 #Remove unwanted blank lines in the subtitles file def _clean_blank_lines(self): #Remove", "= re.sub(r'^', '0', string, re.UNICODE) #correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string = string[0:3]", "re.UNICODE): self._sub_list[i] = re.sub(r' \\?', '?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "not force_language: _language = self._detect_language(self._path_to_sub) else: _language = force_language #If the team strings", "]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if", "_sign, _hour=0, _minute=0, _second=0, _ms=0): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file", "format of the time def _clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return", "#Write file self._write_file(self._path_to_sub, self._sub_list) def Offset(self, _sign, _hour=0, _minute=0, _second=0, _ms=0): if not", "multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Correct comma if", "self._sub_list[i] continue i += 1 #Clean Hi in the subtitles file with regex", ": INFO: Removing lyrics\", logger.DEBUG) self._clean_music() #If Hi must be removed if removeHi:", "+ \"((\" + \")|(\".join(strings.get_guess_english(),True) + \"))\" + \"([ ]|$)\" _count_french = 0 _count_english", "if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon", "self._sub_list[i] continue i += 1 #Clean formatting #Remove blank lines #Test numbers #Formatting", "() self._is_file_loaded = True #If the EOL must be removed if removeEOL: for", "datetime import timedelta from datetime import datetime from regex import strings from sickbeard", "re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE) #remove braces and content self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '', self._sub_list[i],", "= codecs.open(path_to_file, \"r\", \"utf-8\") except: logger.log(\"TidySub : File not encoded in UTF-8 neither", "]|$)\" _count_french = 0 _count_english = 0 i = 0 # Count the", "if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end = re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE)", ": INFO: Subtitles file loaded\", logger.DEBUG) return #Load a text file into a", "False #Save time to offset into a timedelta _time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second,", "if last line if (i+1) == len(self._sub_list)-1: del self._sub_list[i+1] continue elif (i+2) ==", "re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if", "1) and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and not re.match(r'\\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1],", "if i < (len(self._sub_list) - 1) and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and not re.match(r'\\}',", "if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'^\\-', '- ', self._sub_list[i], re.UNICODE) #Correct not", "seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string = string[0:6] + \"0\" + string[6:len(string)] #correct ms", "+ \"))\" + \"([ ]|$)\" _english = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_english(),True)", "the language of the file if not force_language: _language = self._detect_language(self._path_to_sub) else: _language", "def _clean_formatting(self): #Remove unwanted blank lines self._clean_blank_lines() #Remove BOM character self._sub_list[0] = re.sub(u'\\ufeff',", "lines in the subtitles file def _clean_blank_lines(self): #Remove a blank line if it", "\"([ ]|$)\" _count_french = 0 _count_english = 0 i = 0 # Count", "subtitles file was loaded\", logger.DEBUG) return #combine words into one regex string _french", "= force_language #If the team strings must be removed if removeTeam: logger.log(\"TidySub :", "name of speaker in front of the line self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:',", "= re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE) #remove name", "leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i], re.UNICODE) #remove", "return tempList #Write a file def _write_file(self, path_to_file, toWrite): if not self._is_file_loaded: logger.log(\"TidySub", "_sign is not \"-\": logger.log(\"TidySub : Bad sign for offset\", logger.DEBUG) _correct =", "= codecs.open(path_to_file, \"r\", \"latin-1\") except IOError: logger.log(\"TidySub : File does not exist or", "re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "self._sub_list[i]): #First line must always be 1 if i == 0: self._sub_list[i] =", "if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub = path_to_sub else: logger.log(\"TidySub : TidySub only corrects", "#create the new time range line self._sub_list[i] = str(_time_start.hour) + \":\" + str(_time_start.minute)", "is not \"\" and re.match('^[0-9]+$', self._sub_list[i+6]): j += 4 count += 1 else:", "ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string = string[0:9] + \"00\" + string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$',", "must always be 1 if i == 0: self._sub_list[i] = str('1') count =", "re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #remove space before closing balise if re.search(r'", "\"))\" + \"([ ]|$)\" _count_french = 0 _count_english = 0 i = 0", "self._sub_list[i] = re.sub(r' \\!', '!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "and self._sub_list[i+4] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+5]): j += 3 elif self._sub_list[i+3]", "= 0 while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$',", "self._sub_list[i], re.UNICODE) i += 1 #English: Try to correct punctuation in the subtitles", "int)) or _minute < 0 or _minute >= 60: logger.log(\"TidySub : Minute is", "= re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i] = self._clean_time_format(_start) +", "+ \")|(\".join(strings.get_guess_french(),True) + \"))\" + \"([ ]|$)\" _english = \"(^|[ ])\" + \"((\"", "logger.log(\"TidySub : INFO: Guessed language is French\", logger.DEBUG) return \"fr\" elif _count_english >", "re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub = path_to_sub else: logger.log(\"TidySub : TidySub only corrects .srt", "re.sub(r'\\. \\.', '..', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[", "False if (not isinstance(_ms, int)) or _ms < 0 or _ms >= 1000:", "'0', string, re.UNICODE) #correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string = string[0:3] + \"0\"", "logger.log(\"TidySub : INFO: Removing HI\", logger.DEBUG) self._clean_hi() #If punctuation must be corrected if", "subtitles file with regex def _clean_hi(self): i = 0 while i < len(self._sub_list):", "again if self._sub_list[i+2] == \"\" and re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i] del self._sub_list[i] del", "< (len(self._sub_list) - 1) and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and not re.match(r'\\}', self._sub_list[i], re.UNICODE)", "- 1) and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and not re.match(r'\\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}',", "if (not isinstance(_ms, int)) or _ms < 0 or _ms >= 1000: logger.log(\"TidySub", "\"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_french(),True) + \"))\" + \"([ ]|$)\" _english =", "re.UNICODE) #Correct question mark if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\?', '?',", "before a number i = 0 while i < len(self._sub_list)-1: if self._sub_list[i] ==", "#If the team strings must be removed if removeTeam: logger.log(\"TidySub : INFO: Removing", "\" --> \" + \\ str(_time_end.hour) + \":\" + str(_time_end.minute) + \":\" +", ": INFO: Correcting punctuation (English)\", logger.DEBUG) self._clean_punctuation_en() #Clean the formatting before saving the", "list() self._sub_list = list() #Load the subtitles file self._sub_list = self._load_file(self._path_to_sub, True) if", "string combined = \"(\" + \")|(\".join(strings.get_teams()) + \")\" i = 0 while i", "return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end = re.sub(r'\\r\\n','', self._sub_list[i],", "HI\", logger.DEBUG) self._clean_hi() #If punctuation must be corrected if correct_punctuation: if _language ==", "(i+3) == len(self._sub_list)-1: break #If the third line is empty and 4th is", "self._sub_list[i], re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i] = self._clean_time_format(_start) + \" -->", "i += 1 #Clean Hi in the subtitles file with regex def _clean_hi(self):", "the subtitles file self._sub_list = self._load_file(self._path_to_sub, True) if self._sub_list is not None: logger.log(\"TidySub", "trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i], re.UNICODE) #remove multiple whitespaces", "function to clean subtitles def Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language = \"\"):", "while i < len(self._sub_list): #remove parentheses and content self._sub_list[i] = re.sub(r'\\([^)]*\\)', '', self._sub_list[i],", "self._sub_list[i] = str(_time_start.hour) + \":\" + str(_time_start.minute) + \":\" + str(_time_start.second) + \",\"", "string, re.UNICODE) #correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string = string[0:3] + \"0\" +", "saved\", logger.DEBUG) #Try to detect subtitles language def _detect_language(self, path_to_sub): if not self._is_file_loaded:", "self._sub_list[i+5]): j += 3 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not", "removeTeam=False, removeMusic=False, correct_punctuation=False, force_language = \"\"): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles", "and re.match('^[0-9]+$', self._sub_list[i+6]): j += 4 count += 1 else: logger.log(\"TidySub : Formatting", "logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return _correct = True #", "i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading", "return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else:", "re.UNICODE) #Correct exclamation mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!', ' !',", "del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string else: #correct hours", "string): return string else: #correct hours if re.match(r'^[0-9]{1}\\:', string): string = re.sub(r'^', '0',", "-= _time_offset #create the new time range line self._sub_list[i] = str(_time_start.hour) + \":\"", "\"(\" + \")|(\".join(strings.get_teams()) + \")\" i = 0 while i < len(self._sub_list): if", "self._clean_hi() #If punctuation must be corrected if correct_punctuation: if _language == \"fr\": logger.log(\"TidySub", "third line is empty and 4th is a number again if self._sub_list[i+2] ==", ": No subtitles file was loaded\", logger.DEBUG) return #combine words into one regex", "re.UNICODE) #remove braces and content self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE) #remove braces", "brackets and content self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE) #remove brackets split in", "words for each language while i < len(self._sub_list): if re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french", "\"\" and not re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3] continue elif self._sub_list[i+3] == \"\" and", "self._sub_list[i] = re.sub(r'\\!', ' !', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "re.search(combined, self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean Hi in the", "TidySub class class TidySub: \"\"\"Load the subtitle, the file containing regex for removal", "#remove name of speaker in front of the line self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[", "music strings must be removed if removeMusic: logger.log(\"TidySub : INFO: Removing lyrics\", logger.DEBUG)", "mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!', ' !', self._sub_list[i], re.UNICODE) if", "== len(self._sub_list)-1: break #If the third line is empty and 4th is a", "# -*- coding: utf-8 -*- import codecs import re from datetime import timedelta", "saving the subtitles self._clean_formatting() #Write file self._write_file(self._path_to_sub, self._sub_list) def Offset(self, _sign, _hour=0, _minute=0,", "if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE) #Correct dots", "string[0:9] + \"0\" + string[9:len(string)] return string #Try to correct the format of", "= path_to_sub else: logger.log(\"TidySub : TidySub only corrects .srt files\", logger.DEBUG) return self._team_list", "#remove parentheses split in two lines if i < (len(self._sub_list) - 1) and", "+= 1 #Delete 1st line if blank if self._sub_list[0] == \"\": del self._sub_list[0]", "split in two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\{', self._sub_list[i],", "re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE) #remove brackets and", "front of the line self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i], re.UNICODE) #remove", "parameters if _sign is not \"+\" and _sign is not \"-\": logger.log(\"TidySub :", "def _clean_music(self): i = 0 while i < len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i], re.UNICODE):", "logger.DEBUG) _correct = False if (not isinstance(_ms, int)) or _ms < 0 or", "(not isinstance(_ms, int)) or _ms < 0 or _ms >= 1000: logger.log(\"TidySub :", "(French)\", logger.DEBUG) self._clean_punctuation_fr() elif _language == \"en\": logger.log(\"TidySub : INFO: Correcting punctuation (English)\",", "\"((\" + \")|(\".join(strings.get_guess_french(),True) + \"))\" + \"([ ]|$)\" _english = \"(^|[ ])\" +", "while i < len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i +=", "< 0 or _hour > 5: logger.log(\"TidySub : Hour is not correct for", "the third line is empty and 4th is a number again if self._sub_list[i+2]", "'', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE) #remove braces and content", "or _hour < 0 or _hour > 5: logger.log(\"TidySub : Hour is not", "question mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?', ' ?', self._sub_list[i], re.UNICODE)", "regex def _clean_punctuation_fr(self): i = 0 while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+',", "else: for i in fileToRead: tempList.append(i) fileToRead.close() return tempList #Write a file def", "j = 1 #If the line is a number if re.match('^[0-9]+$', self._sub_list[i]): #First", "_minute < 0 or _minute >= 60: logger.log(\"TidySub : Minute is not correct", "self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #remove space before closing balise", "self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE) #remove name of speaker in", "#Correct comma if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\,', ',', self._sub_list[i], re.UNICODE)", "self._team_list = list() self._sub_list = list() #Load the subtitles file self._sub_list = self._load_file(self._path_to_sub,", "line is a number if re.match('^[0-9]+$', self._sub_list[i]): #First line must always be 1", "not re.match(r'\\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$', '', self._sub_list[i],", "removeEOL: for i in fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for i in fileToRead: tempList.append(i) fileToRead.close()", "elif (i+3) == len(self._sub_list)-1: break #If the third line is empty and 4th", "re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL self._sub_list[i] = re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE) #Extract start time", "if re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english += 1 i += 1 #Return the language", "line self._sub_list[i] = str(_time_start.hour) + \":\" + str(_time_start.minute) + \":\" + str(_time_start.second) +", "UTF-8 neither in latin-1\", logger.DEBUG) return return tempList = list () self._is_file_loaded =", "1 else: self._sub_list[i] = str(count) #Exception if last line if i == len(self._sub_list)-1:", "empty balise self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i], re.UNICODE) i += 1", "self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i], re.UNICODE) i += 1 #French: Try", "not \"+\" and _sign is not \"-\": logger.log(\"TidySub : Bad sign for offset\",", "correct_punctuation: if _language == \"fr\": logger.log(\"TidySub : INFO: Correcting punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr()", "string else: #correct hours if re.match(r'^[0-9]{1}\\:', string): string = re.sub(r'^', '0', string, re.UNICODE)", "logger.DEBUG) #Try to detect subtitles language def _detect_language(self, path_to_sub): if not self._is_file_loaded: logger.log(\"TidySub", "self._sub_list[i], re.UNICODE) and not re.match(r'\\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i] =", "re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) i", "\"\": del self._sub_list[len(self._sub_list)-1] break #Check the second line #Check if it's a time", "re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE)", "self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end = re.sub(r'\\r\\n','',", "= re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i] = self._clean_time_format(_start) + \" --> \" + self._clean_time_format(_end)", "No subtitles file was loaded\", logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub =", "coding: utf-8 -*- import codecs import re from datetime import timedelta from datetime", "of the parameters if _sign is not \"+\" and _sign is not \"-\":", "#Check the second line #Check if it's a time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]):", "\":\" + str(_time_end.second) + \",\" + str(_time_end.microsecond/1000) #correct the time range line format", "number again if self._sub_list[i+2] == \"\" and re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i] del self._sub_list[i]", "re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\. \\.', '...', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.',", "list () self._is_file_loaded = True #If the EOL must be removed if removeEOL:", "not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return fileDest =", "_start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end = re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'',", "self._sub_list[i+3] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3] continue elif self._sub_list[i+3] ==", "self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i], re.UNICODE)", "= re.sub(r' \\,', ',', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")',", "tempList #Write a file def _write_file(self, path_to_file, toWrite): if not self._is_file_loaded: logger.log(\"TidySub :", "_clean_punctuation_en(self): i = 0 while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and", "not before a number i = 0 while i < len(self._sub_list)-1: if self._sub_list[i]", "+= 1 #Clean formatting #Remove blank lines #Test numbers #Formatting of time def", "one regex string combined = \"(\" + \")|(\".join(strings.get_teams()) + \")\" i = 0", "and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)',", "self._sub_list[i] continue elif self._sub_list[i+2] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2] continue", "1 #Clean Hi in the subtitles file with regex def _clean_hi(self): i =", "re.sub(r' \\;', ';', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ';", "INFO: Guessed language is English\", logger.DEBUG) return \"en\" else: return \"undefined\" #Test Regex", "= str('1') count = 1 else: self._sub_list[i] = str(count) #Exception if last line", "+ \" --> \" + \\ str(_time_end.hour) + \":\" + str(_time_end.minute) + \":\"", "re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}', '',", "self._sub_list[i] = re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE) #remove", "+ string[3:len(string)] #correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string = string[0:6] + \"0\" +", "= re.sub(r'\\. \\. \\.', '...', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.', '..', self._sub_list[i],", "# Check consistency of the parameters if _sign is not \"+\" and _sign", "re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i],", "self._load_file(self._path_to_sub, True) if self._sub_list is not None: logger.log(\"TidySub : INFO: Subtitles file loaded\",", "\\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE):", "does not exist or sub is in mkv\", logger.DEBUG) return except: try: fileToRead", "continue elif self._sub_list[i+3] == \"\" and re.match('^[0-9]+$', self._sub_list[i+4]): j += 2 elif self._sub_list[i+3]", "self._sub_list[i+1], re.UNICODE) #remove brackets and content self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE) #remove", "]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.*[^", "a timedelta _time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i = 0 while i", "and self._sub_list[i+4] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4] continue elif self._sub_list[i+3]", "subtitles file was loaded\", logger.DEBUG) return _correct = True # Check consistency of", "from datetime import datetime from regex import strings from sickbeard import logger #Definition", "+= 1 #English: Try to correct punctuation in the subtitles file with regex", "= re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "= list() self._sub_list = list() #Load the subtitles file self._sub_list = self._load_file(self._path_to_sub, True)", "#Exception if last line if (i+1) == len(self._sub_list)-1: del self._sub_list[i+1] continue elif (i+2)", "path_to_sub = re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language() def _guess_language(self): if", "= re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE) #remove braces and content self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '',", "', self._sub_list[i], re.UNICODE) i += 1 #Remove music from line def _clean_music(self): i", "'', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE) #remove brackets and content", "\"en\" else: return \"undefined\" #Test Regex for team words def _clean_team(self): #combine team", "\"fr\" elif _count_english > _count_french: logger.log(\"TidySub : INFO: Guessed language is English\", logger.DEBUG)", "for i in toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub : INFO: Subtitles file saved\", logger.DEBUG)", "= re.sub(r'\\!', ' !', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")',", "tempList.append(i) fileToRead.close() return tempList #Write a file def _write_file(self, path_to_file, toWrite): if not", "#Correct semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ' ;', self._sub_list[i], re.UNICODE)", "TidySub only corrects .srt files\", logger.DEBUG) return self._team_list = list() self._sub_list = list()", "#Remove blank lines #Test numbers #Formatting of time def _clean_formatting(self): #Remove unwanted blank", "range line self._sub_list[i] = str(_time_start.hour) + \":\" + str(_time_start.minute) + \":\" + str(_time_start.second)", "and self._sub_list[i+4] is not \"\" and self._sub_list[i+5] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+6]):", "re from datetime import timedelta from datetime import datetime from regex import strings", "the TidySub class class TidySub: \"\"\"Load the subtitle, the file containing regex for", "except IOError: logger.log(\"TidySub : File does not exist or sub is in mkv\",", "language of the file if not force_language: _language = self._detect_language(self._path_to_sub) else: _language =", "return \"fr\" elif _count_english > _count_french: logger.log(\"TidySub : INFO: Guessed language is English\",", "line #Check if it's a time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j +=", "timedelta _time_end = datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate", "self._sub_list = self._load_file(self._path_to_sub, True) if self._sub_list is not None: logger.log(\"TidySub : INFO: Subtitles", "for team words def _clean_team(self): #combine team names into one regex string combined", "else: _language = force_language #If the team strings must be removed if removeTeam:", "self._sub_list[i+4] is not \"\" and self._sub_list[i+5] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+6]): j", "highest count if _count_french > _count_english: logger.log(\"TidySub : INFO: Guessed language is French\",", "if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j += 1 #Exception if last line if (i+1)", "\"\" and self._sub_list[i+4] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+5]): j += 3 elif", "def _clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start", "re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?', ' ?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i],", "\"\" and re.match('^[0-9]+$', self._sub_list[i+6]): j += 4 count += 1 else: logger.log(\"TidySub :", "self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces", "if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading and trailing spaces", "of the line self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i], re.UNICODE) #remove leading", "must be removed if removeMusic: logger.log(\"TidySub : INFO: Removing lyrics\", logger.DEBUG) self._clean_music() #If", "to correct the format of the time def _clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\>", "if it's a time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j += 1 #Exception", "balise if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\<\\/', '</', self._sub_list[i], re.UNICODE)", ": INFO: Guessed language is English\", logger.DEBUG) return \"en\" else: return \"undefined\" #Test", "with regex def _clean_punctuation_fr(self): i = 0 while i < len(self._sub_list): if not", "offset\", logger.DEBUG) _correct = False if (not isinstance(_second, int)) or _second < 0", "file with regex def _clean_punctuation_fr(self): i = 0 while i < len(self._sub_list): if", "re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct", "+ str(_time_start.minute) + \":\" + str(_time_start.second) + \",\" + str(_time_start.microsecond/1000) + \" -->", "#Main function to clean subtitles def Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language =", "#Correct question mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?', ' ?', self._sub_list[i],", "\":\" + str(_time_start.second) + \",\" + str(_time_start.microsecond/1000) + \" --> \" + \\", "removed if removeMusic: logger.log(\"TidySub : INFO: Removing lyrics\", logger.DEBUG) self._clean_music() #If Hi must", "and not re.match(r'\\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$', '',", "self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end = re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end =", "self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.', '..', self._sub_list[i], re.UNICODE) #remove leading and trailing", "mkv\", logger.DEBUG) return except: try: fileToRead = codecs.open(path_to_file, \"r\", \"utf-8\") except: logger.log(\"TidySub :", "elif self._sub_list[i+3] == \"\" and re.match('^[0-9]+$', self._sub_list[i+4]): j += 2 elif self._sub_list[i+3] is", "subtitles file def _clean_blank_lines(self): #Remove a blank line if it is not before", "re.sub(r'^', '0', string, re.UNICODE) #correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string = string[0:3] +", "re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Correct comma if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "always be 1 if i == 0: self._sub_list[i] = str('1') count = 1", "self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?', '", "consistency of the parameters if _sign is not \"+\" and _sign is not", "len(self._sub_list)-1: self._sub_list[i] += '\\r\\n' i += 1 #Remove unwanted blank lines in the", "_count_english = 0 i = 0 # Count the number of occurences of", "not re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4] continue elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4]", "+ \":\" + str(_time_end.second) + \",\" + str(_time_end.microsecond/1000) #correct the time range line", "])\" + \"((\" + \")|(\".join(strings.get_guess_french(),True) + \"))\" + \"([ ]|$)\" _english = \"(^|[", "blank lines in the subtitles file def _clean_blank_lines(self): #Remove a blank line if", "#remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) i +=", "\"+\" and _sign is not \"-\": logger.log(\"TidySub : Bad sign for offset\", logger.DEBUG)", "1 else: logger.log(\"TidySub : Formatting error : timerange\", logger.DEBUG) else: logger.log(\"TidySub : Formatting", "= \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_french(),True) + \"))\" + \"([ ]|$)\" _english", "list in utf8 def _load_file(self, path_to_file, removeEOL=False): try: fileToRead = codecs.open(path_to_file, \"r\", \"latin-1\")", "perform the cleaning and formatting actions\"\"\" def __init__(self, path_to_sub): #Boolean to stock if", "re.match(r'\\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE)", "self._sub_list[i] = re.sub(r'\\. \\.', '..', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i]", "re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.* \\;\",self._sub_list[i],", "self._sub_list[i] = re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE) #delete empty balise self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^", "two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and", "correct for offset\", logger.DEBUG) _correct = False if not _correct: return False #Save", "len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean formatting", "\"-\": logger.log(\"TidySub : Bad sign for offset\", logger.DEBUG) _correct = False if (not", "re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and not re.match(r'\\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i]", "string[0:3] + \"0\" + string[3:len(string)] #correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string = string[0:6]", "else: #correct hours if re.match(r'^[0-9]{1}\\:', string): string = re.sub(r'^', '0', string, re.UNICODE) #correct", "def _guess_language(self): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG)", "removal and perform the cleaning and formatting actions\"\"\" def __init__(self, path_to_sub): #Boolean to", "+= 1 i += 1 #Return the language which has the highest count", "= re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Remove line with just a single", "self._clean_team() #If music strings must be removed if removeMusic: logger.log(\"TidySub : INFO: Removing", "_count_french = 0 _count_english = 0 i = 0 # Count the number", "re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct", ": INFO: Guessed language is French\", logger.DEBUG) return \"fr\" elif _count_english > _count_french:", "self._sub_list[i], re.UNICODE) and not re.match(r'\\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i] =", "'! ', self._sub_list[i], re.UNICODE) #Correct hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'^\\-',", "'', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$',", "_language == \"en\": logger.log(\"TidySub : INFO: Correcting punctuation (English)\", logger.DEBUG) self._clean_punctuation_en() #Clean the", "self._sub_list[i+2] == \"\" and re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i] del self._sub_list[i] del self._sub_list[i] continue", "logger.DEBUG) return #Try to determine the language of the file if not force_language:", "= re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE):", "del self._sub_list[i] continue elif self._sub_list[i+2] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2]", "the subtitles file def _clean_blank_lines(self): #Remove a blank line if it is not", "logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return fileDest = codecs.open(path_to_file, \"w\",", "_correct = False if (not isinstance(_ms, int)) or _ms < 0 or _ms", "re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '',", "= re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE) #remove braces split in two lines if i", "detect subtitles language def _detect_language(self, path_to_sub): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles", "and self._sub_list[i+5] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+6]): j += 4 count +=", "': ', self._sub_list[i], re.UNICODE) #Correct dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'", "else: return \"undefined\" #Test Regex for team words def _clean_team(self): #combine team names", "#remove EOL self._sub_list[i] = re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE) #Extract start time and save", "= string[0:6] + \"0\" + string[6:len(string)] #correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string =", "hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'^\\-', '- ', self._sub_list[i], re.UNICODE) #Correct", "#combine team names into one regex string combined = \"(\" + \")|(\".join(strings.get_teams()) +", "\"\": del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string else: #correct", "each language while i < len(self._sub_list): if re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french += 1", "+= 1 #Exception if last line if (i+1) == len(self._sub_list)-1: del self._sub_list[i+1] continue", "#English: Try to correct punctuation in the subtitles file with regex def _clean_punctuation_en(self):", "\" --> \" + self._clean_time_format(_end) #Main function to clean subtitles def Clean(self, removeHi=False,", "Removing lyrics\", logger.DEBUG) self._clean_music() #If Hi must be removed if removeHi: logger.log(\"TidySub :", "< len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL self._sub_list[i] = re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE)", "re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean formatting #Remove blank", "#remove space before closing balise if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'", "'', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE) #remove name of speaker", "datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate the new time", "self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ' ;',", "re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE) #remove braces split in two lines if i <", "#if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct", "microseconds=(_ms*1000)) i = 0 while i < len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL", "if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return if", "= 0 _count_english = 0 i = 0 # Count the number of", "regex for removal and perform the cleaning and formatting actions\"\"\" def __init__(self, path_to_sub):", "#Try to detect subtitles language def _detect_language(self, path_to_sub): if not self._is_file_loaded: logger.log(\"TidySub :", "re.sub(r'\\? \\? \\?', '???', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\. \\.', '...', self._sub_list[i],", "\"([ ]|$)\" _english = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_english(),True) + \"))\" +", "re.UNICODE) #Correct dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i],", "+ str(_time_end.minute) + \":\" + str(_time_end.second) + \",\" + str(_time_end.microsecond/1000) #correct the time", "_count_english += 1 i += 1 #Return the language which has the highest", "+ \"0\" + string[9:len(string)] return string #Try to correct the format of the", "subtitle, the file containing regex for removal and perform the cleaning and formatting", "self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!', '", "for removal and perform the cleaning and formatting actions\"\"\" def __init__(self, path_to_sub): #Boolean", "line with just a single hyphen self._sub_list[i] = re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE) #delete", "and not re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3] continue elif self._sub_list[i+3] == \"\" and re.match('^[0-9]+$',", "re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'^\\-', '- ', self._sub_list[i], re.UNICODE) #Correct not regular", "fileDest = codecs.open(path_to_file, \"w\", \"latin-1\") for i in toWrite: fileDest.write(i) fileDest.close() logger.log(\"TidySub :", "new time if _sign == \"+\": _time_start += _time_offset _time_end += _time_offset elif", "#Call the function self._clean_team() #If music strings must be removed if removeMusic: logger.log(\"TidySub", "'; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'", "self._sub_list is not None: logger.log(\"TidySub : INFO: Subtitles file loaded\", logger.DEBUG) return #Load", "re.UNICODE) #Correct semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ' ;', self._sub_list[i],", "closing balise if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\<\\/', '</', self._sub_list[i],", "and re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i] del self._sub_list[i] del self._sub_list[i] continue elif self._sub_list[i+2] ==", "== \"\" and not re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4] continue elif self._sub_list[i+3] is not", "#Correct hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'^\\-', '- ', self._sub_list[i], re.UNICODE)", "re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Correct comma if re.match(\"^.+", "', self._sub_list[i], re.UNICODE) #remove space before closing balise if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE):", "+= 1 else: logger.log(\"TidySub : Formatting error : timerange\", logger.DEBUG) else: logger.log(\"TidySub :", ": Minute is not correct for offset\", logger.DEBUG) _correct = False if (not", "i += 1 #Return the language which has the highest count if _count_french", "colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:', ' :', self._sub_list[i], re.UNICODE) if", "self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i], re.UNICODE) #remove multiple whitespaces self._sub_list[i] =", "\\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\<\\/', '</', self._sub_list[i], re.UNICODE) i += 1", "punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr() elif _language == \"en\": logger.log(\"TidySub : INFO: Correcting punctuation", "re.UNICODE) #remove name of speaker in front of the line self._sub_list[i] = re.sub(r'^[", "str(_time_end.microsecond/1000) #correct the time range line format self._clean_time_range(i) #re add EOL self._sub_list[i] +=", "lines self._clean_blank_lines() #Remove BOM character self._sub_list[0] = re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE) #Delete unnecessary", "whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Remove line with just", "_time_offset _time_end += _time_offset elif _sign == \"-\": _time_start -= _time_offset _time_end -=", "self._sub_list[i+4] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4] continue elif self._sub_list[i+3] is", "self._sub_list[i], re.UNICODE) #Correct not regular expressions self._sub_list[i] = re.sub(r'\\? \\!', '?!', self._sub_list[i], re.UNICODE)", "words into one regex string _french = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_french(),True)", "- 1) and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and not re.match(r'\\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]',", "1 #Remove unwanted blank lines in the subtitles file def _clean_blank_lines(self): #Remove a", "re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i] continue i += 1 #Delete 1st line if blank", "subtitles language def _detect_language(self, path_to_sub): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file", "self._sub_list[i].lower(), re.UNICODE): _count_french += 1 if re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english += 1 i", "re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3] continue elif self._sub_list[i+3] == \"\" and re.match('^[0-9]+$', self._sub_list[i+4]): j", "\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\?', '?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE):", "string _french = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_french(),True) + \"))\" + \"([", "re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\<\\/', '</', self._sub_list[i], re.UNICODE) i +=", "is loaded self._is_file_loaded = False #Path to the subtitles file if re.match(r'^.+\\.srt$', path_to_sub,", "self._sub_list[i] = re.sub(r' \\;', ';', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "i in fileToRead: tempList.append(i) fileToRead.close() return tempList #Write a file def _write_file(self, path_to_file,", "self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string", "\\.', '.', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ',", "for i in fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for i in fileToRead: tempList.append(i) fileToRead.close() return", "self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean formatting #Remove blank lines", "_end, re.UNICODE) self._sub_list[i] = self._clean_time_format(_start) + \" --> \" + self._clean_time_format(_end) #Main function", "in timedelta _time_end = datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f')", "blank if self._sub_list[0] == \"\": del self._sub_list[0] #Delete last line if blank if", "== \"+\": _time_start += _time_offset _time_end += _time_offset elif _sign == \"-\": _time_start", "if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ' ;', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^", "the formatting before saving the subtitles self._clean_formatting() #Write file self._write_file(self._path_to_sub, self._sub_list) def Offset(self,", "brackets split in two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\[',", "self._is_file_loaded = False #Path to the subtitles file if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub", "#Path to the subtitles file if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub = path_to_sub else:", "= re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE) #Delete unnecessary lines i = 0 count =", "]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i], re.UNICODE) #Correct hyphen if re.match(\"^\\-[^", "== len(self._sub_list)-1: break elif (i+3) == len(self._sub_list)-1: break #If the third line is", "\"r\", \"utf-8\") except: logger.log(\"TidySub : File not encoded in UTF-8 neither in latin-1\",", "line if it is not before a number i = 0 while i", "\\t]+|[ \\t]+$', '', self._sub_list[i], re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', '", "time range line format self._clean_time_range(i) #re add EOL self._sub_list[i] += '\\r\\n' i +=", "_write_file(self, path_to_file, toWrite): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\",", "re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE) #remove parentheses split in two lines if i <", "= \"(\" + \")|(\".join(strings.get_teams()) + \")\" i = 0 while i < len(self._sub_list):", "time def _clean_formatting(self): #Remove unwanted blank lines self._clean_blank_lines() #Remove BOM character self._sub_list[0] =", "_time_end -= _time_offset #create the new time range line self._sub_list[i] = str(_time_start.hour) +", "removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language = \"\"): if not self._is_file_loaded: logger.log(\"TidySub : No", "if _count_french > _count_english: logger.log(\"TidySub : INFO: Guessed language is French\", logger.DEBUG) return", "re.match('^[0-9]+$', self._sub_list[i+5]): j += 3 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is", "_clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start =", "self._clean_time_range(i+1) j += 1 #Exception if last line if (i+1) == len(self._sub_list)-1: del", "]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark", "re.sub(r'^\\-', '- ', self._sub_list[i], re.UNICODE) #Correct not regular expressions self._sub_list[i] = re.sub(r'\\? \\!',", "self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+ \\?\",self._sub_list[i],", "or _ms < 0 or _ms >= 1000: logger.log(\"TidySub : Milisecond is not", "re.UNICODE) #Correct question mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?', ' ?',", "re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i], re.UNICODE) #Correct hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i],", "\"))\" + \"([ ]|$)\" _english = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_english(),True) +", "is a number if re.match('^[0-9]+$', self._sub_list[i]): #First line must always be 1 if", "line if blank if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string): if", "self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Correct comma if re.match(\"^.+ \\,\",self._sub_list[i],", "self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]', '', self._sub_list[i+1],", "self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE):", "re.UNICODE) #Extract start time and save in timedelta _time_start = datetime.strptime('01/01/10 ' +", "logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)]", "#If the line is a number if re.match('^[0-9]+$', self._sub_list[i]): #First line must always", "a number i = 0 while i < len(self._sub_list)-1: if self._sub_list[i] == \"\"", "re.UNICODE) #remove parentheses split in two lines if i < (len(self._sub_list) - 1)", "and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and not re.match(r'\\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE):", "re.UNICODE) i += 1 #Remove music from line def _clean_music(self): i = 0", "self._sub_list[i] = str(count) #Exception if last line if i == len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1]", "is not \"\" and self._sub_list[i+4] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+5]): j +=", "_minute=0, _second=0, _ms=0): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\",", "re.UNICODE) and not re.match(r'\\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$',", "if correct_punctuation: if _language == \"fr\": logger.log(\"TidySub : INFO: Correcting punctuation (French)\", logger.DEBUG)", "(not isinstance(_minute, int)) or _minute < 0 or _minute >= 60: logger.log(\"TidySub :", "False if (not isinstance(_hour, int)) or _hour < 0 or _hour > 5:", "= string[0:9] + \"0\" + string[9:len(string)] return string #Try to correct the format", "if re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean formatting #Remove", "re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Correct", "is not correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_ms, int))", "_ms >= 1000: logger.log(\"TidySub : Milisecond is not correct for offset\", logger.DEBUG) _correct", "'', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract end time and save in timedelta _time_end", "0 count = 1 while i < len(self._sub_list): j = 1 #If the", "re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] =", "line must always be 1 if i == 0: self._sub_list[i] = str('1') count", "'%d/%m/%y %H:%M:%S,%f') #Extract end time and save in timedelta _time_end = datetime.strptime('01/01/10 '", "re.UNICODE): del self._sub_list[i] continue i += 1 #Clean Hi in the subtitles file", "re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "colon if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\:', ':', self._sub_list[i], re.UNICODE) if", "logger.log(\"TidySub : INFO: Subtitles file saved\", logger.DEBUG) #Try to detect subtitles language def", "self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean Hi in the subtitles", "string = string[0:9] + \"0\" + string[9:len(string)] return string #Try to correct the", "self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+5]): j", "return _correct = True # Check consistency of the parameters if _sign is", "2 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] == \"\" and not re.match('^[0-9]+$',", "save in timedelta _time_start = datetime.strptime('01/01/10 ' + re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i],", "re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i],", "function self._clean_team() #If music strings must be removed if removeMusic: logger.log(\"TidySub : INFO:", "\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\;', ';', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE):", "'\\r\\n' i += 1 #Remove unwanted blank lines in the subtitles file def", "len(self._sub_list): #remove parentheses and content self._sub_list[i] = re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE) #remove parentheses", "+ \"))\" + \"([ ]|$)\" _count_french = 0 _count_english = 0 i =", "parentheses split in two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\(',", "self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return #Try to determine", "+ \"00\" + string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string = string[0:9] + \"0\" +", "self._sub_list[i+3] is not \"\" and self._sub_list[i+4] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+5]): del", "is not correct for offset\", logger.DEBUG) _correct = False if not _correct: return", "_count_english: logger.log(\"TidySub : INFO: Guessed language is French\", logger.DEBUG) return \"fr\" elif _count_english", "= re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE) #remove name of speaker in front of the", "self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE):", "of the TidySub class class TidySub: \"\"\"Load the subtitle, the file containing regex", "#Remove BOM character self._sub_list[0] = re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE) #Delete unnecessary lines i", "lines #Test numbers #Formatting of time def _clean_formatting(self): #Remove unwanted blank lines self._clean_blank_lines()", "\\? \\?', '???', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\. \\.', '...', self._sub_list[i], re.UNICODE)", "self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\:', ':',", "',', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i],", "_correct: return False #Save time to offset into a timedelta _time_offset = timedelta(hours=_hour,", "len(self._sub_list)-1: break elif (i+3) == len(self._sub_list)-1: break #If the third line is empty", "is empty and 4th is a number again if self._sub_list[i+2] == \"\" and", "time to offset into a timedelta _time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i", "path_to_file, removeEOL=False): try: fileToRead = codecs.open(path_to_file, \"r\", \"latin-1\") except IOError: logger.log(\"TidySub : File", "is not \"\" and self._sub_list[i+4] is not \"\" and self._sub_list[i+5] is not \"\"", "from datetime import timedelta from datetime import datetime from regex import strings from", "time and save in timedelta _time_end = datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i],", "#Clean formatting #Remove blank lines #Test numbers #Formatting of time def _clean_formatting(self): #Remove", "try: fileToRead = codecs.open(path_to_file, \"r\", \"latin-1\") except IOError: logger.log(\"TidySub : File does not", "= 0 while i < len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL self._sub_list[i] =", "_clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string else: #correct hours if re.match(r'^[0-9]{1}\\:', string):", "last line if blank if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string):", "sub is in mkv\", logger.DEBUG) return except: try: fileToRead = codecs.open(path_to_file, \"r\", \"utf-8\")", "5: logger.log(\"TidySub : Hour is not correct for offset\", logger.DEBUG) _correct = False", "self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE)", "and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and not re.match(r'\\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE):", "and save in timedelta _time_end = datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE),", "])\" + \"((\" + \")|(\".join(strings.get_guess_english(),True) + \"))\" + \"([ ]|$)\" _count_french = 0", "\")\" i = 0 while i < len(self._sub_list): if re.search(combined, self._sub_list[i], re.UNICODE): del", "file was loaded\", logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$', '',", "1 i += 1 #Return the language which has the highest count if", "self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '',", "self._sub_list[i] = re.sub(r' \\?', '?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "\\ str(_time_end.hour) + \":\" + str(_time_end.minute) + \":\" + str(_time_end.second) + \",\" +", "self._sub_list[i+1]): del self._sub_list[i] continue i += 1 #Delete 1st line if blank if", "line is empty and 4th is a number again if self._sub_list[i+2] == \"\"", "continue i += 1 #Delete 1st line if blank if self._sub_list[0] == \"\":", "words def _clean_team(self): #combine team names into one regex string combined = \"(\"", "re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Calculate the new time if _sign ==", "self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\;', ';',", "whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) i += 1 #Remove", "of occurences of the words for each language while i < len(self._sub_list): if", "path_to_sub.lower(), re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language() def", "#Exception if last line if i == len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] ==", "string): string = string[0:9] + \"0\" + string[9:len(string)] return string #Try to correct", "= re.sub(r'\\? \\!', '?!', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\? \\? \\?', '???', self._sub_list[i],", "]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\<\\/', '</', self._sub_list[i], re.UNICODE) i += 1 #English:", ": Milisecond is not correct for offset\", logger.DEBUG) _correct = False if not", "dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) #if", "< 0 or _ms >= 1000: logger.log(\"TidySub : Milisecond is not correct for", "+= _time_offset elif _sign == \"-\": _time_start -= _time_offset _time_end -= _time_offset #create", "if removeTeam: logger.log(\"TidySub : INFO: Removing teams names\", logger.DEBUG) #Call the function self._clean_team()", "'', self._sub_list[i], re.UNICODE) #Extract start time and save in timedelta _time_start = datetime.strptime('01/01/10", "time if _sign == \"+\": _time_start += _time_offset _time_end += _time_offset elif _sign", "language def _detect_language(self, path_to_sub): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was", "#Definition of the TidySub class class TidySub: \"\"\"Load the subtitle, the file containing", "logger.DEBUG) return return tempList = list () self._is_file_loaded = True #If the EOL", "\\?', '???', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\. \\.', '...', self._sub_list[i], re.UNICODE) self._sub_list[i]", "break #If the third line is empty and 4th is a number again", "and not re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4] continue elif self._sub_list[i+3] is not \"\" and", "last line if (i+1) == len(self._sub_list)-1: del self._sub_list[i+1] continue elif (i+2) == len(self._sub_list)-1:", "== len(self._sub_list)-1: del self._sub_list[i+1] continue elif (i+2) == len(self._sub_list)-1: break elif (i+3) ==", "of time def _clean_formatting(self): #Remove unwanted blank lines self._clean_blank_lines() #Remove BOM character self._sub_list[0]", ">= 60: logger.log(\"TidySub : Second is not correct for offset\", logger.DEBUG) _correct =", "re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)', '',", "self._sub_list[i+1] = re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE) #remove name of speaker in front of", "' ', self._sub_list[i], re.UNICODE) i += 1 #Remove music from line def _clean_music(self):", "len(self._sub_list): j = 1 #If the line is a number if re.match('^[0-9]+$', self._sub_list[i]):", "lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and not", "formatting #Remove blank lines #Test numbers #Formatting of time def _clean_formatting(self): #Remove unwanted", ": INFO: Correcting punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr() elif _language == \"en\": logger.log(\"TidySub :", "0 _count_english = 0 i = 0 # Count the number of occurences", "]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?', ' ?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE):", "_clean_team(self): #combine team names into one regex string combined = \"(\" + \")|(\".join(strings.get_teams())", "self._sub_list[i+4] continue elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\" and", "while i < len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL self._sub_list[i] = re.sub(r'\\r\\n$', '',", "if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return #combine", "i += 1 #English: Try to correct punctuation in the subtitles file with", "return tempList = list () self._is_file_loaded = True #If the EOL must be", "if i < (len(self._sub_list) - 1) and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and not re.match(r'\\]',", "into a timedelta _time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i = 0 while", "+ \":\" + str(_time_end.minute) + \":\" + str(_time_end.second) + \",\" + str(_time_end.microsecond/1000) #correct", "INFO: Guessed language is French\", logger.DEBUG) return \"fr\" elif _count_english > _count_french: logger.log(\"TidySub", "self._sub_list[i+5] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+6]): j += 4 count += 1", "Try to correct punctuation in the subtitles file with regex def _clean_punctuation_fr(self): i", "\"\" and not re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i] continue i += 1 #Delete 1st", "int)) or _ms < 0 or _ms >= 1000: logger.log(\"TidySub : Milisecond is", "in fileToRead: tempList.append(i) fileToRead.close() return tempList #Write a file def _write_file(self, path_to_file, toWrite):", "i in fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for i in fileToRead: tempList.append(i) fileToRead.close() return tempList", "encoded in UTF-8 neither in latin-1\", logger.DEBUG) return return tempList = list ()", "#Correct colon if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\:', ':', self._sub_list[i], re.UNICODE)", "del self._sub_list[i] continue i += 1 #Clean Hi in the subtitles file with", "\"0\" + string[6:len(string)] #correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string = string[0:9] + \"00\"", "re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english += 1 i += 1 #Return the language which", ": Bad sign for offset\", logger.DEBUG) _correct = False if (not isinstance(_hour, int))", "semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ' ;', self._sub_list[i], re.UNICODE) if", "\\<\\/', '</', self._sub_list[i], re.UNICODE) i += 1 #English: Try to correct punctuation in", "'?!', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\? \\? \\?', '???', self._sub_list[i], re.UNICODE) self._sub_list[i] =", "re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!', ' !', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i],", "Check consistency of the parameters if _sign is not \"+\" and _sign is", "re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct", "if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\;', ';', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^", "'', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language() def _guess_language(self): if not self._is_file_loaded: logger.log(\"TidySub", "to clean subtitles def Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language = \"\"): if", "0 while i < len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL self._sub_list[i] = re.sub(r'\\r\\n$',", "if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\?', '?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^", "'</', self._sub_list[i], re.UNICODE) i += 1 #English: Try to correct punctuation in the", "line if i == len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1]", "isinstance(_second, int)) or _second < 0 or _second >= 60: logger.log(\"TidySub : Second", ": No subtitles file was loaded\", logger.DEBUG) return _correct = True # Check", "import datetime from regex import strings from sickbeard import logger #Definition of the", "0: self._sub_list[i] = str('1') count = 1 else: self._sub_list[i] = str(count) #Exception if", "Correcting punctuation (English)\", logger.DEBUG) self._clean_punctuation_en() #Clean the formatting before saving the subtitles self._clean_formatting()", "self._write_file(self._path_to_sub, self._sub_list) def Offset(self, _sign, _hour=0, _minute=0, _second=0, _ms=0): if not self._is_file_loaded: logger.log(\"TidySub", "1000: logger.log(\"TidySub : Milisecond is not correct for offset\", logger.DEBUG) _correct = False", "regex def _clean_hi(self): i = 0 while i < len(self._sub_list): #remove parentheses and", "string #Try to correct the format of the time def _clean_time_range(self, i): if", "string[6:len(string)] #correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string = string[0:9] + \"00\" + string[9:len(string)]", "]+\\>', '', self._sub_list[i], re.UNICODE) i += 1 #French: Try to correct punctuation in", "file was loaded\", logger.DEBUG) return #combine words into one regex string _french =", "\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\:', ':', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE):", "def _clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string else: #correct hours if re.match(r'^[0-9]{1}\\:',", "self._sub_list[i], re.UNICODE) i += 1 #Remove music from line def _clean_music(self): i =", "\\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE):", "'..', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$',", "= re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE) #remove parentheses split in two lines if i", "i += 1 #Delete 1st line if blank if self._sub_list[0] == \"\": del", "continue i += 1 #Clean Hi in the subtitles file with regex def", "= re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE) #Correct dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "subtitles file was loaded\", logger.DEBUG) return #Try to determine the language of the", "the line is a number if re.match('^[0-9]+$', self._sub_list[i]): #First line must always be", "string[3:len(string)] #correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string = string[0:6] + \"0\" + string[6:len(string)]", "split in two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\(', self._sub_list[i],", "and not re.match(r'\\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$', '',", "self._sub_list[i] = re.sub(r'\\:', ' :', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "str(_time_end.hour) + \":\" + str(_time_end.minute) + \":\" + str(_time_end.second) + \",\" + str(_time_end.microsecond/1000)", "re.UNICODE) #Correct exclamation mark if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\!', '!',", "mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?', ' ?', self._sub_list[i], re.UNICODE) if", "a number again if self._sub_list[i+2] == \"\" and re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i] del", "occurences of the words for each language while i < len(self._sub_list): if re.search(_french,", "logger.log(\"TidySub : INFO: Removing lyrics\", logger.DEBUG) self._clean_music() #If Hi must be removed if", "or _second < 0 or _second >= 60: logger.log(\"TidySub : Second is not", "force_language #If the team strings must be removed if removeTeam: logger.log(\"TidySub : INFO:", "logger.DEBUG) return #combine words into one regex string _french = \"(^|[ ])\" +", "_time_start += _time_offset _time_end += _time_offset elif _sign == \"-\": _time_start -= _time_offset", "from sickbeard import logger #Definition of the TidySub class class TidySub: \"\"\"Load the", "string[0:6] + \"0\" + string[6:len(string)] #correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string = string[0:9]", "= list() #Load the subtitles file self._sub_list = self._load_file(self._path_to_sub, True) if self._sub_list is", ": INFO: Removing teams names\", logger.DEBUG) #Call the function self._clean_team() #If music strings", "and content self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE) #remove brackets split in two", "self._sub_list[0] == \"\": del self._sub_list[0] #Delete last line if blank if self._sub_list[len(self._sub_list)-1] ==", "# Count the number of occurences of the words for each language while", "while i < len(self._sub_list): if re.search(combined, self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i +=", "self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\!',", "< len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading and", "is a number again if self._sub_list[i+2] == \"\" and re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i]", "== \"\" and not re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2] continue #if 3rd line is", "force_language: _language = self._detect_language(self._path_to_sub) else: _language = force_language #If the team strings must", "multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #remove space before", "just a single hyphen self._sub_list[i] = re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE) #delete empty balise", "isinstance(_hour, int)) or _hour < 0 or _hour > 5: logger.log(\"TidySub : Hour", "+ str(_time_start.second) + \",\" + str(_time_start.microsecond/1000) + \" --> \" + \\ str(_time_end.hour)", "'', self._sub_list[i], re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i],", "';', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i],", "#Write a file def _write_file(self, path_to_file, toWrite): if not self._is_file_loaded: logger.log(\"TidySub : No", "i = 0 while i < len(self._sub_list): #remove parentheses and content self._sub_list[i] =", "\\.', '..', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[", "i = 0 count = 1 while i < len(self._sub_list): j = 1", "< len(self._sub_list)-1: if self._sub_list[i] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i] continue", "a time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j += 1 #Exception if last", "'. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "1 #Return the language which has the highest count if _count_french > _count_english:", "self._sub_list[i+3]): del self._sub_list[i] del self._sub_list[i] del self._sub_list[i] continue elif self._sub_list[i+2] == \"\" and", "self._sub_list[i+3]): del self._sub_list[i+2] continue #if 3rd line is not empty elif self._sub_list[i+3] ==", "_sign == \"+\": _time_start += _time_offset _time_end += _time_offset elif _sign == \"-\":", "not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\",", "while i < len(self._sub_list)-1: self._sub_list[i] += '\\r\\n' i += 1 #Remove unwanted blank", "', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;',", "be corrected if correct_punctuation: if _language == \"fr\": logger.log(\"TidySub : INFO: Correcting punctuation", "if it is not before a number i = 0 while i <", "= re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #remove space before closing balise if", "content self._sub_list[i] = re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE) #remove parentheses split in two lines", "re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ' ;', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i],", "continue elif (i+2) == len(self._sub_list)-1: break elif (i+3) == len(self._sub_list)-1: break #If the", "and formatting actions\"\"\" def __init__(self, path_to_sub): #Boolean to stock if file is loaded", "re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i] =", "not correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_ms, int)) or", "self._sub_list[0] = re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE) #Delete unnecessary lines i = 0 count", "correct_punctuation=False, force_language = \"\"): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was", "_correct = False if (not isinstance(_minute, int)) or _minute < 0 or _minute", "re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i] = self._clean_time_format(_start) + \" --> \" + self._clean_time_format(_end) #Main", "\\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\,', ',', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE):", "re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4] continue elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is", "self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE):", "re.UNICODE): _count_english += 1 i += 1 #Return the language which has the", "multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Remove line with", "if re.search(combined, self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean Hi in", "re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark", "]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.*", "with regex def _clean_punctuation_en(self): i = 0 while i < len(self._sub_list): if not", "1 #French: Try to correct punctuation in the subtitles file with regex def", "#Calculate the new time if _sign == \"+\": _time_start += _time_offset _time_end +=", "line def _clean_music(self): i = 0 while i < len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i],", "self._sub_list[i], re.UNICODE) #delete empty balise self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i], re.UNICODE)", "minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string = string[0:3] + \"0\" + string[3:len(string)] #correct seconds", "logger.log(\"TidySub : INFO: Guessed language is English\", logger.DEBUG) return \"en\" else: return \"undefined\"", "string): string = re.sub(r'^', '0', string, re.UNICODE) #correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string", "_english = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_english(),True) + \"))\" + \"([ ]|$)\"", "Hour is not correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_minute,", "]{2,}', ' ', self._sub_list[i], re.UNICODE) i += 1 #Remove music from line def", "#remove parentheses and content self._sub_list[i] = re.sub(r'\\([^)]*\\)', '', self._sub_list[i], re.UNICODE) #remove parentheses split", "if self._sub_list[i] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+1]): del self._sub_list[i] continue i +=", "\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\!', '!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE):", "blank if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string):", "= re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i], re.UNICODE) #Correct hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "+ str(_time_end.second) + \",\" + str(_time_end.microsecond/1000) #correct the time range line format self._clean_time_range(i)", "i < len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1", "re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i] = self._clean_time_format(_start) + \"", "> _count_english: logger.log(\"TidySub : INFO: Guessed language is French\", logger.DEBUG) return \"fr\" elif", "if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?', ' ?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^", "self._clean_time_range(i) #re add EOL self._sub_list[i] += '\\r\\n' i += 1 #Write the new", "Guessed language is French\", logger.DEBUG) return \"fr\" elif _count_english > _count_french: logger.log(\"TidySub :", "string = string[0:9] + \"00\" + string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string = string[0:9]", "#Correct not regular expressions self._sub_list[i] = re.sub(r'\\? \\!', '?!', self._sub_list[i], re.UNICODE) self._sub_list[i] =", "the parameters if _sign is not \"+\" and _sign is not \"-\": logger.log(\"TidySub", "' + re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract end time", "names\", logger.DEBUG) #Call the function self._clean_team() #If music strings must be removed if", "def _clean_team(self): #combine team names into one regex string combined = \"(\" +", "> 5: logger.log(\"TidySub : Hour is not correct for offset\", logger.DEBUG) _correct =", "two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and", "_detect_language(self, path_to_sub): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG)", "self._sub_list[i]): #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i],", "subtitles file was loaded\", logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$',", "self._sub_list[i] = re.sub(r'\\(.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE) #remove", "break elif (i+3) == len(self._sub_list)-1: break #If the third line is empty and", "Formatting error : timerange\", logger.DEBUG) else: logger.log(\"TidySub : Formatting error : number line\",", "was loaded\", logger.DEBUG) return #Try to determine the language of the file if", "return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language() def _guess_language(self): if not self._is_file_loaded: logger.log(\"TidySub : No", "re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Remove line with just a single hyphen", "j += 4 count += 1 else: logger.log(\"TidySub : Formatting error : timerange\",", "j += 2 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] == \"\" and", "\"\" and re.match('^[0-9]+$', self._sub_list[i+5]): j += 3 elif self._sub_list[i+3] is not \"\" and", ": File not encoded in UTF-8 neither in latin-1\", logger.DEBUG) return return tempList", "False if not _correct: return False #Save time to offset into a timedelta", "removed if removeTeam: logger.log(\"TidySub : INFO: Removing teams names\", logger.DEBUG) #Call the function", "be removed if removeEOL: for i in fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for i in", "', self._sub_list[i], re.UNICODE) #Remove line with just a single hyphen self._sub_list[i] = re.sub(r'^\\-$',", ".srt files\", logger.DEBUG) return self._team_list = list() self._sub_list = list() #Load the subtitles", "while i < len(self._sub_list): if re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french += 1 if re.search(_english,", "re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE) #Correct dots if re.match(\"^.+ \\.\",self._sub_list[i],", "language is French\", logger.DEBUG) return \"fr\" elif _count_english > _count_french: logger.log(\"TidySub : INFO:", "force_language = \"\"): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\",", "!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i],", "+ \"([ ]|$)\" _english = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_english(),True) + \"))\"", "loaded\", logger.DEBUG) return #Load a text file into a list in utf8 def", "re.UNICODE): self._path_to_sub = path_to_sub else: logger.log(\"TidySub : TidySub only corrects .srt files\", logger.DEBUG)", "No subtitles file was loaded\", logger.DEBUG) return _correct = True # Check consistency", "#Remove music from line def _clean_music(self): i = 0 while i < len(self._sub_list):", "< len(self._sub_list)-1: self._sub_list[i] += '\\r\\n' i += 1 #Remove unwanted blank lines in", "space before closing balise if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\<\\/',", "re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i],", "re.UNICODE): self._sub_list[i] = re.sub(r' \\!', '!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "it is not before a number i = 0 while i < len(self._sub_list)-1:", "corrects .srt files\", logger.DEBUG) return self._team_list = list() self._sub_list = list() #Load the", "', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?',", "elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+5]):", "string): string = string[0:6] + \"0\" + string[6:len(string)] #correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string):", ": Second is not correct for offset\", logger.DEBUG) _correct = False if (not", "len(self._sub_list)-1: del self._sub_list[i+1] continue elif (i+2) == len(self._sub_list)-1: break elif (i+3) == len(self._sub_list)-1:", "\"latin-1\") except IOError: logger.log(\"TidySub : File does not exist or sub is in", "re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i], re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}',", "logger.log(\"TidySub : Minute is not correct for offset\", logger.DEBUG) _correct = False if", "re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\?', '?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i],", "len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading and trailing", "del self._sub_list[i+2] continue #if 3rd line is not empty elif self._sub_list[i+3] == \"\"", "re.sub(r' \\<\\/', '</', self._sub_list[i], re.UNICODE) i += 1 #English: Try to correct punctuation", "int)) or _hour < 0 or _hour > 5: logger.log(\"TidySub : Hour is", "\":\" + str(_time_start.minute) + \":\" + str(_time_start.second) + \",\" + str(_time_start.microsecond/1000) + \"", "elif _language == \"en\": logger.log(\"TidySub : INFO: Correcting punctuation (English)\", logger.DEBUG) self._clean_punctuation_en() #Clean", "#combine words into one regex string _french = \"(^|[ ])\" + \"((\" +", "timerange\", logger.DEBUG) else: logger.log(\"TidySub : Formatting error : number line\", logger.DEBUG) i +=", "isinstance(_ms, int)) or _ms < 0 or _ms >= 1000: logger.log(\"TidySub : Milisecond", "self._sub_list[i], re.UNICODE) #remove brackets split in two lines if i < (len(self._sub_list) -", "#remove braces and content self._sub_list[i] = re.sub(r'\\{[^)]*\\}', '', self._sub_list[i], re.UNICODE) #remove braces split", "re.UNICODE): self._sub_list[i] = re.sub(r' \\,', ',', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "self._sub_list[i], re.UNICODE) if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE)", "#Clean the formatting before saving the subtitles self._clean_formatting() #Write file self._write_file(self._path_to_sub, self._sub_list) def", "\\.', '.', self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '.", "is not \"\" and self._sub_list[i+5] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+6]): j +=", "\"\" and self._sub_list[i+5] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+6]): j += 4 count", "INFO: Removing HI\", logger.DEBUG) self._clean_hi() #If punctuation must be corrected if correct_punctuation: if", "in front of the line self._sub_list[i] = re.sub(r'^[ \\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i], re.UNICODE)", "re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE)", "if re.match(\"^.+\\,[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon", "file self._sub_list = self._load_file(self._path_to_sub, True) if self._sub_list is not None: logger.log(\"TidySub : INFO:", "exclamation mark if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\!', '!', self._sub_list[i], re.UNICODE)", "i < (len(self._sub_list) - 1) and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and not re.match(r'\\)', self._sub_list[i],", "+ str(_time_start.microsecond/1000) + \" --> \" + \\ str(_time_end.hour) + \":\" + str(_time_end.minute)", "self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1]", "]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i], re.UNICODE) i += 1 #French: Try to correct punctuation", "i < len(self._sub_list): j = 1 #If the line is a number if", "self._sub_list[0] #Delete last line if blank if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] def", "#remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Remove line", "', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.* \\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'", "re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end = re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end", "+ self._clean_time_format(_end) #Main function to clean subtitles def Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False,", "= self._clean_time_format(_start) + \" --> \" + self._clean_time_format(_end) #Main function to clean subtitles", "-= _time_offset _time_end -= _time_offset #create the new time range line self._sub_list[i] =", "re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE): path_to_sub = re.sub(r'\\.[a-z]+$', '', path_to_sub.lower()) return path_to_sub[len(path_to_sub)-2:len(path_to_sub)] else: return self._guess_language()", "self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1]", "and not re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[", "# self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+[^", "' ', self._sub_list[i], re.UNICODE) #Remove line with just a single hyphen self._sub_list[i] =", "= \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_english(),True) + \"))\" + \"([ ]|$)\" _count_french", "self._sub_list[i] = re.sub(r'\\?', ' ?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "self._sub_list[i] = re.sub(r' \\<\\/', '</', self._sub_list[i], re.UNICODE) i += 1 #English: Try to", "re.UNICODE) #Correct colon if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\:', ':', self._sub_list[i],", "== \"\" and not re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3] continue elif self._sub_list[i+3] == \"\"", "(len(self._sub_list) - 1) and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and not re.match(r'\\]', self._sub_list[i], re.UNICODE) and", "not encoded in UTF-8 neither in latin-1\", logger.DEBUG) return return tempList = list", "self._sub_list = list() #Load the subtitles file self._sub_list = self._load_file(self._path_to_sub, True) if self._sub_list", "re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "language is English\", logger.DEBUG) return \"en\" else: return \"undefined\" #Test Regex for team", "removeTeam: logger.log(\"TidySub : INFO: Removing teams names\", logger.DEBUG) #Call the function self._clean_team() #If", "re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french += 1 if re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english += 1", "self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:', ' :',", "i < len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL self._sub_list[i] = re.sub(r'\\r\\n$', '', self._sub_list[i],", "a blank line if it is not before a number i = 0", "' ?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ',", "balise self._sub_list[i] = re.sub(r'\\<[^ ]+\\>\\<\\/[^ ]+\\>', '', self._sub_list[i], re.UNICODE) i += 1 #French:", "not re.match(r'^[0-9]+$', self._sub_list[i]): #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$',", "#correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string = string[0:3] + \"0\" + string[3:len(string)] #correct", "removeHi: logger.log(\"TidySub : INFO: Removing HI\", logger.DEBUG) self._clean_hi() #If punctuation must be corrected", "re.UNICODE): self._sub_list[i] = re.sub(r' \\;', ';', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i]", "datetime import datetime from regex import strings from sickbeard import logger #Definition of", "second line #Check if it's a time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j", "file with regex def _clean_hi(self): i = 0 while i < len(self._sub_list): #remove", "i == len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] break #Check", "_clean_blank_lines(self): #Remove a blank line if it is not before a number i", "(len(self._sub_list) - 1) and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and not re.match(r'\\)', self._sub_list[i], re.UNICODE) and", "if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL self._sub_list[i] = re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE) #Extract start", "#Correct question mark if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\?', '?', self._sub_list[i],", "re.match(r'\\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE)", "removed if removeEOL: for i in fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for i in fileToRead:", "0 or _minute >= 60: logger.log(\"TidySub : Minute is not correct for offset\",", "not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return #Try to", "<reponame>Branlala/docker-sickbeardfr<filename>sickbeard/lib/tidysub/cleaner.py #!/usr/bin/env python # -*- coding: utf-8 -*- import codecs import re from", "_language = self._detect_language(self._path_to_sub) else: _language = force_language #If the team strings must be", "to offset into a timedelta _time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i =", "in mkv\", logger.DEBUG) return except: try: fileToRead = codecs.open(path_to_file, \"r\", \"utf-8\") except: logger.log(\"TidySub", "re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\[.*$', '', self._sub_list[i], re.UNICODE) self._sub_list[i+1] =", "for i in fileToRead: tempList.append(i) fileToRead.close() return tempList #Write a file def _write_file(self,", "string[9:len(string)] return string #Try to correct the format of the time def _clean_time_range(self,", "re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question", "self._sub_list[i], re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\]', '', self._sub_list[i+1], re.UNICODE) #remove braces and content self._sub_list[i]", "self._sub_list[i]): #remove EOL self._sub_list[i] = re.sub(r'\\r\\n$', '', self._sub_list[i], re.UNICODE) #Extract start time and", "INFO: Correcting punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr() elif _language == \"en\": logger.log(\"TidySub : INFO:", "', self._sub_list[i], re.UNICODE) #Correct comma if re.match(\"^.+ \\,\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\,',", "i = 0 while i < len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): #remove EOL self._sub_list[i]", "0 while i < len(self._sub_list): if re.search(combined, self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i", "self._sub_list[i] = str('1') count = 1 else: self._sub_list[i] = str(count) #Exception if last", "timedelta from datetime import datetime from regex import strings from sickbeard import logger", "if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return fileDest", "formatting before saving the subtitles self._clean_formatting() #Write file self._write_file(self._path_to_sub, self._sub_list) def Offset(self, _sign,", "re.sub(r'\\!', ' !', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '!", "lines i = 0 count = 1 while i < len(self._sub_list): j =", "re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #remove", "\"00\" + string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string = string[0:9] + \"0\" + string[9:len(string)]", "\"undefined\" #Test Regex for team words def _clean_team(self): #combine team names into one", "to correct punctuation in the subtitles file with regex def _clean_punctuation_en(self): i =", "return self._guess_language() def _guess_language(self): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was", "before saving the subtitles self._clean_formatting() #Write file self._write_file(self._path_to_sub, self._sub_list) def Offset(self, _sign, _hour=0,", "re.UNICODE) self._sub_list[i] = re.sub(r'\\? \\? \\?', '???', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.", "INFO: Subtitles file saved\", logger.DEBUG) #Try to detect subtitles language def _detect_language(self, path_to_sub):", "#Try to determine the language of the file if not force_language: _language =", "re.UNICODE) and not re.match(r'\\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\(.*$',", "== \"\": del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string else:", "re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and not re.match(r'\\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i]", "corrected if correct_punctuation: if _language == \"fr\": logger.log(\"TidySub : INFO: Correcting punctuation (French)\",", "line is not empty elif self._sub_list[i+3] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+4]): del", "#Extract start time and save in timedelta _time_start = datetime.strptime('01/01/10 ' + re.sub(r'", "if re.match(\"^.+[^ ]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!', ' !', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^", "determine the language of the file if not force_language: _language = self._detect_language(self._path_to_sub) else:", "datetime.strptime('01/01/10 ' + re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract end", "INFO: Correcting punctuation (English)\", logger.DEBUG) self._clean_punctuation_en() #Clean the formatting before saving the subtitles", ">= 60: logger.log(\"TidySub : Minute is not correct for offset\", logger.DEBUG) _correct =", "#Re add the EOL character i = 0 while i < len(self._sub_list)-1: self._sub_list[i]", "i < (len(self._sub_list) - 1) and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and not re.match(r'\\]', self._sub_list[i],", "empty and 4th is a number again if self._sub_list[i+2] == \"\" and re.match('^[0-9]+$',", ":', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i],", "= 1 else: self._sub_list[i] = str(count) #Exception if last line if i ==", "self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE) #Correct dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE):", "elif (i+2) == len(self._sub_list)-1: break elif (i+3) == len(self._sub_list)-1: break #If the third", "self._sub_list[len(self._sub_list)-1] break #Check the second line #Check if it's a time range if", "split in two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\[', self._sub_list[i],", "in the subtitles file with regex def _clean_punctuation_en(self): i = 0 while i", "4th is a number again if self._sub_list[i+2] == \"\" and re.match('^[0-9]+$', self._sub_list[i+3]): del", "team words def _clean_team(self): #combine team names into one regex string combined =", "one regex string _french = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_french(),True) + \"))\"", "subtitles file with regex def _clean_punctuation_en(self): i = 0 while i < len(self._sub_list):", "0 i = 0 # Count the number of occurences of the words", "class TidySub: \"\"\"Load the subtitle, the file containing regex for removal and perform", "of the time def _clean_time_range(self, i): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3} \\-\\-\\> [0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if", "_second < 0 or _second >= 60: logger.log(\"TidySub : Second is not correct", "= re.sub(r'^.*\\)', '', self._sub_list[i+1], re.UNICODE) #remove brackets and content self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '',", "add EOL self._sub_list[i] += '\\r\\n' i += 1 #Write the new SRT file", "= re.sub(r'\\?', ' ?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")',", "= self._detect_language(self._path_to_sub) else: _language = force_language #If the team strings must be removed", "regex string _french = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_french(),True) + \"))\" +", "logger.log(\"TidySub : Milisecond is not correct for offset\", logger.DEBUG) _correct = False if", "\\?', '?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ',", "language while i < len(self._sub_list): if re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french += 1 if", "logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return #combine words into one", "save in timedelta _time_end = datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '', self._sub_list[i], re.UNICODE), '%d/%m/%y", "return False #Save time to offset into a timedelta _time_offset = timedelta(hours=_hour, minutes=_minute,", "is not \"+\" and _sign is not \"-\": logger.log(\"TidySub : Bad sign for", "if i == len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] break", "= re.sub(r'^\\-', '- ', self._sub_list[i], re.UNICODE) #Correct not regular expressions self._sub_list[i] = re.sub(r'\\?", "_hour > 5: logger.log(\"TidySub : Hour is not correct for offset\", logger.DEBUG) _correct", "' :', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ',", "logger.DEBUG) _correct = False if not _correct: return False #Save time to offset", "== \"-\": _time_start -= _time_offset _time_end -= _time_offset #create the new time range", "file if re.match(r'^.+\\.srt$', path_to_sub, re.UNICODE): self._path_to_sub = path_to_sub else: logger.log(\"TidySub : TidySub only", "EOL character i = 0 while i < len(self._sub_list)-1: self._sub_list[i] += '\\r\\n' i", "end time and save in timedelta _time_end = datetime.strptime('01/01/10 ' + re.sub(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s', '',", "1 if re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english += 1 i += 1 #Return the", "text file into a list in utf8 def _load_file(self, path_to_file, removeEOL=False): try: fileToRead", "]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.*", "re.UNICODE) #correct minutes if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string = string[0:3] + \"0\" + string[3:len(string)]", "re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '. ', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+", "i < (len(self._sub_list) - 1) and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and not re.match(r'\\}', self._sub_list[i],", "< len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del self._sub_list[i] continue i += 1 #Clean", "_time_start = datetime.strptime('01/01/10 ' + re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f')", "', self._sub_list[i], re.UNICODE) #Correct colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:', '", "[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', self._sub_list[i]): return if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]): _start = re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end =", "\"\" and re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i] del self._sub_list[i] del self._sub_list[i] continue elif self._sub_list[i+2]", "j += 3 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\"", "and 4th is a number again if self._sub_list[i+2] == \"\" and re.match('^[0-9]+$', self._sub_list[i+3]):", "while i < len(self._sub_list): j = 1 #If the line is a number", "if re.search(_french, self._sub_list[i].lower(), re.UNICODE): _count_french += 1 if re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english +=", "into one regex string combined = \"(\" + \")|(\".join(strings.get_teams()) + \")\" i =", "', self._sub_list[i], re.UNICODE) #Correct exclamation mark if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'", "clean subtitles def Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language = \"\"): if not", "mark if re.match(\"^.+ \\?\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\?', '?', self._sub_list[i], re.UNICODE) if", "be removed if removeMusic: logger.log(\"TidySub : INFO: Removing lyrics\", logger.DEBUG) self._clean_music() #If Hi", "latin-1\", logger.DEBUG) return return tempList = list () self._is_file_loaded = True #If the", "logger.log(\"TidySub : Formatting error : number line\", logger.DEBUG) i += j #Re add", ";', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ', self._sub_list[i],", "not \"\" and re.match('^[0-9]+$', self._sub_list[i+6]): j += 4 count += 1 else: logger.log(\"TidySub", "subtitles file self._sub_list = self._load_file(self._path_to_sub, True) if self._sub_list is not None: logger.log(\"TidySub :", "re.UNICODE) #Correct not regular expressions self._sub_list[i] = re.sub(r'\\? \\!', '?!', self._sub_list[i], re.UNICODE) self._sub_list[i]", "'', self._sub_list[i], re.UNICODE) #remove braces split in two lines if i < (len(self._sub_list)", "Correcting punctuation (French)\", logger.DEBUG) self._clean_punctuation_fr() elif _language == \"en\": logger.log(\"TidySub : INFO: Correcting", "continue elif self._sub_list[i+2] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i+2] continue #if", "not \"\" and self._sub_list[i+5] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+6]): j += 4", "self._sub_list[i], re.UNICODE) and not re.match(r'\\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1], re.UNICODE): self._sub_list[i] =", "_correct = False if (not isinstance(_second, int)) or _second < 0 or _second", "def _write_file(self, path_to_file, toWrite): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was", "if re.match(r'^[0-9]{2}\\:[0-9]{1}\\:', string): string = string[0:3] + \"0\" + string[3:len(string)] #correct seconds if", "= 1 #If the line is a number if re.match('^[0-9]+$', self._sub_list[i]): #First line", "is French\", logger.DEBUG) return \"fr\" elif _count_english > _count_french: logger.log(\"TidySub : INFO: Guessed", "TidySub: \"\"\"Load the subtitle, the file containing regex for removal and perform the", "if blank if self._sub_list[0] == \"\": del self._sub_list[0] #Delete last line if blank", "re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$', string): return string else: #correct hours if re.match(r'^[0-9]{1}\\:', string): string = re.sub(r'^',", "i == 0: self._sub_list[i] = str('1') count = 1 else: self._sub_list[i] = str(count)", "elif _count_english > _count_french: logger.log(\"TidySub : INFO: Guessed language is English\", logger.DEBUG) return", "self._sub_list[i], re.UNICODE) i += 1 #French: Try to correct punctuation in the subtitles", "is not before a number i = 0 while i < len(self._sub_list)-1: if", "unnecessary lines i = 0 count = 1 while i < len(self._sub_list): j", "1 #Exception if last line if (i+1) == len(self._sub_list)-1: del self._sub_list[i+1] continue elif", "re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark", "except: try: fileToRead = codecs.open(path_to_file, \"r\", \"utf-8\") except: logger.log(\"TidySub : File not encoded", "not re.match('^[0-9]+$', self._sub_list[i+4]): del self._sub_list[i+3] continue elif self._sub_list[i+3] == \"\" and re.match('^[0-9]+$', self._sub_list[i+4]):", "was loaded\", logger.DEBUG) return #combine words into one regex string _french = \"(^|[", "_clean_music(self): i = 0 while i < len(self._sub_list): if re.search(u'\\u266a', self._sub_list[i], re.UNICODE): del", "re.match('^[0-9]+$', self._sub_list[i+3]): del self._sub_list[i] del self._sub_list[i] del self._sub_list[i] continue elif self._sub_list[i+2] == \"\"", "== \"\": del self._sub_list[len(self._sub_list)-1] break #Check the second line #Check if it's a", "_clean_punctuation_fr(self): i = 0 while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and", "self._sub_list[i], re.UNICODE) #Correct dots if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.',", "_end = re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end, re.UNICODE) self._sub_list[i] = self._clean_time_format(_start)", "= re.sub(\"\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$\",'', self._sub_list[i], re.UNICODE) _end = re.sub(r'\\r\\n','', self._sub_list[i], re.UNICODE) _end = re.sub(\"^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s\",'', _end,", "logger.DEBUG) return \"fr\" elif _count_english > _count_french: logger.log(\"TidySub : INFO: Guessed language is", "= re.sub(r' \\<\\/', '</', self._sub_list[i], re.UNICODE) i += 1 #English: Try to correct", "_correct = False if (not isinstance(_hour, int)) or _hour < 0 or _hour", "(i+2) == len(self._sub_list)-1: break elif (i+3) == len(self._sub_list)-1: break #If the third line", "\\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract end time and save in", "'???', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\. \\.', '...', self._sub_list[i], re.UNICODE) self._sub_list[i] =", "self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): # self._sub_list[i]", "re.UNICODE) self._sub_list[i+1] = re.sub(r'^.*\\}', '', self._sub_list[i+1], re.UNICODE) #remove name of speaker in front", "loaded\", logger.DEBUG) return _correct = True # Check consistency of the parameters if", "'!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!(?!\\\")', '! ', self._sub_list[i],", "#If punctuation must be corrected if correct_punctuation: if _language == \"fr\": logger.log(\"TidySub :", "into one regex string _french = \"(^|[ ])\" + \"((\" + \")|(\".join(strings.get_guess_french(),True) +", "(not isinstance(_hour, int)) or _hour < 0 or _hour > 5: logger.log(\"TidySub :", "re.UNICODE) #remove space before closing balise if re.search(r' \\<\\/[^ ]+\\>',self._sub_list[i], re.UNICODE): self._sub_list[i] =", "regex def _clean_punctuation_en(self): i = 0 while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+',", "self._sub_list[i+1], re.UNICODE) #remove name of speaker in front of the line self._sub_list[i] =", "if re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^", "#remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i], re.UNICODE)", "in the subtitles file with regex def _clean_hi(self): i = 0 while i", "= re.sub(r' \\?', '?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")',", "timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i = 0 while i < len(self._sub_list): if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+\\s\\-\\-\\>\\s[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+',", "--> \" + \\ str(_time_end.hour) + \":\" + str(_time_end.minute) + \":\" + str(_time_end.second)", "the subtitles file with regex def _clean_punctuation_en(self): i = 0 while i <", "\\!', '?!', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\? \\? \\?', '???', self._sub_list[i], re.UNICODE) self._sub_list[i]", "Removing HI\", logger.DEBUG) self._clean_hi() #If punctuation must be corrected if correct_punctuation: if _language", "except: logger.log(\"TidySub : File not encoded in UTF-8 neither in latin-1\", logger.DEBUG) return", "\\t]*[A-Z]+[ \\t]*\\:', '', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[", "1st line if blank if self._sub_list[0] == \"\": del self._sub_list[0] #Delete last line", "+= 1 #Clean Hi in the subtitles file with regex def _clean_hi(self): i", "#Correct colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:', ' :', self._sub_list[i], re.UNICODE)", "offset\", logger.DEBUG) _correct = False if not _correct: return False #Save time to", "_minute >= 60: logger.log(\"TidySub : Minute is not correct for offset\", logger.DEBUG) _correct", "re.sub(r'\\;', ' ;', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', ';", "= string[0:3] + \"0\" + string[3:len(string)] #correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string =", "#Remove line with just a single hyphen self._sub_list[i] = re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE)", "self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) i += 1 #Remove music", "i < len(self._sub_list)-1: self._sub_list[i] += '\\r\\n' i += 1 #Remove unwanted blank lines", "if removeMusic: logger.log(\"TidySub : INFO: Removing lyrics\", logger.DEBUG) self._clean_music() #If Hi must be", "str(count) #Exception if last line if i == len(self._sub_list)-1: del self._sub_list[len(self._sub_list)-1] if self._sub_list[len(self._sub_list)-1]", "No subtitles file was loaded\", logger.DEBUG) return fileDest = codecs.open(path_to_file, \"w\", \"latin-1\") for", "self._sub_list[i+5]): del self._sub_list[i+4] continue elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not", "re.UNICODE) #Correct hyphen if re.match(\"^\\-[^ ]\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'^\\-', '- ', self._sub_list[i],", "re.match(\"^.+ \\.\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) #if re.match(\"^.+\\.[^ ]+\",self._sub_list[i],", "+ \")\" i = 0 while i < len(self._sub_list): if re.search(combined, self._sub_list[i], re.UNICODE):", "re.sub(r' \\?', '?', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\?[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\?(?!\\\")', '.", "not re.match(r'\\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE): self._sub_list[i] = re.sub(r'\\{.*$', '', self._sub_list[i],", "= str(_time_start.hour) + \":\" + str(_time_start.minute) + \":\" + str(_time_start.second) + \",\" +", "multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) i += 1", "#correct hours if re.match(r'^[0-9]{1}\\:', string): string = re.sub(r'^', '0', string, re.UNICODE) #correct minutes", "_correct = True # Check consistency of the parameters if _sign is not", "BOM character self._sub_list[0] = re.sub(u'\\ufeff', '', self._sub_list[0], re.UNICODE) #Delete unnecessary lines i =", "0 or _second >= 60: logger.log(\"TidySub : Second is not correct for offset\",", "hyphen self._sub_list[i] = re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE) #delete empty balise self._sub_list[i] = re.sub(r'\\<[^", "the new time if _sign == \"+\": _time_start += _time_offset _time_end += _time_offset", "re.UNICODE) #remove multiple whitespaces self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Remove", ": Formatting error : number line\", logger.DEBUG) i += j #Re add the", "+ \")|(\".join(strings.get_teams()) + \")\" i = 0 while i < len(self._sub_list): if re.search(combined,", "is not correct for offset\", logger.DEBUG) _correct = False if (not isinstance(_minute, int))", "it's a time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1) j += 1 #Exception if", "'...', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.', '..', self._sub_list[i], re.UNICODE) #remove leading and", "of the file if not force_language: _language = self._detect_language(self._path_to_sub) else: _language = force_language", "', self._sub_list[i], re.UNICODE) #Correct not regular expressions self._sub_list[i] = re.sub(r'\\? \\!', '?!', self._sub_list[i],", "_count_english > _count_french: logger.log(\"TidySub : INFO: Guessed language is English\", logger.DEBUG) return \"en\"", "i += 1 #French: Try to correct punctuation in the subtitles file with", "Guessed language is English\", logger.DEBUG) return \"en\" else: return \"undefined\" #Test Regex for", "range line format self._clean_time_range(i) #re add EOL self._sub_list[i] += '\\r\\n' i += 1", "if re.match('^[0-9]+$', self._sub_list[i]): #First line must always be 1 if i == 0:", "the subtitles file with regex def _clean_hi(self): i = 0 while i <", "offset into a timedelta _time_offset = timedelta(hours=_hour, minutes=_minute, seconds=_second, microseconds=(_ms*1000)) i = 0", "logger.DEBUG) _correct = False if (not isinstance(_minute, int)) or _minute < 0 or", "if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string = string[0:9] + \"00\" + string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string):", "if re.match(\"^.* \\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\:', ':', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^", "and save in timedelta _time_start = datetime.strptime('01/01/10 ' + re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '',", "files\", logger.DEBUG) return self._team_list = list() self._sub_list = list() #Load the subtitles file", "punctuation in the subtitles file with regex def _clean_punctuation_en(self): i = 0 while", "(English)\", logger.DEBUG) self._clean_punctuation_en() #Clean the formatting before saving the subtitles self._clean_formatting() #Write file", "re.match(r'^.*\\[', self._sub_list[i], re.UNICODE) and not re.match(r'\\]', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\]', self._sub_list[i+1], re.UNICODE): self._sub_list[i]", "self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ': ', self._sub_list[i], re.UNICODE)", "self._sub_list[i], re.UNICODE) #Extract start time and save in timedelta _time_start = datetime.strptime('01/01/10 '", "class class TidySub: \"\"\"Load the subtitle, the file containing regex for removal and", "re.UNICODE) #remove brackets and content self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE) #remove brackets", "re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.', '..', self._sub_list[i], re.UNICODE) #remove leading and trailing spaces", "#re add EOL self._sub_list[i] += '\\r\\n' i += 1 #Write the new SRT", "\")|(\".join(strings.get_teams()) + \")\" i = 0 while i < len(self._sub_list): if re.search(combined, self._sub_list[i],", ": INFO: Subtitles file saved\", logger.DEBUG) #Try to detect subtitles language def _detect_language(self,", "if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string = string[0:6] + \"0\" + string[6:len(string)] #correct ms if", "removeMusic: logger.log(\"TidySub : INFO: Removing lyrics\", logger.DEBUG) self._clean_music() #If Hi must be removed", "%H:%M:%S,%f') #Extract end time and save in timedelta _time_end = datetime.strptime('01/01/10 ' +", "removeMusic=False, correct_punctuation=False, force_language = \"\"): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file", "else: logger.log(\"TidySub : Formatting error : number line\", logger.DEBUG) i += j #Re", "= re.sub(r'\\? \\? \\?', '???', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\. \\.', '...',", "re.sub(r' \\.', '.', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\.[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '.", "__init__(self, path_to_sub): #Boolean to stock if file is loaded self._is_file_loaded = False #Path", "must be removed if removeHi: logger.log(\"TidySub : INFO: Removing HI\", logger.DEBUG) self._clean_hi() #If", "punctuation must be corrected if correct_punctuation: if _language == \"fr\": logger.log(\"TidySub : INFO:", "the EOL must be removed if removeEOL: for i in fileToRead: tempList.append(i.rstrip('\\n\\r')) else:", "= 0 i = 0 # Count the number of occurences of the", "re.sub(r'\\,(?!\\\")', ', ', self._sub_list[i], re.UNICODE) #Correct semi-colon if re.match(\"^.*[^ ]+\\;\",self._sub_list[i], re.UNICODE): self._sub_list[i] =", "+ \"0\" + string[3:len(string)] #correct seconds if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{1}\\,', string): string = string[0:6] +", "containing regex for removal and perform the cleaning and formatting actions\"\"\" def __init__(self,", "self._sub_list[i].lower(), re.UNICODE): _count_english += 1 i += 1 #Return the language which has", "if i == 0: self._sub_list[i] = str('1') count = 1 else: self._sub_list[i] =", "be removed if removeHi: logger.log(\"TidySub : INFO: Removing HI\", logger.DEBUG) self._clean_hi() #If punctuation", "in the subtitles file with regex def _clean_punctuation_fr(self): i = 0 while i", "0 while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not re.match(r'^[0-9]+$', self._sub_list[i]):", "logger.log(\"TidySub : No subtitles file was loaded\", logger.DEBUG) return if re.match(\"^.+\\.[a-z]{2}\\.srt$\", path_to_sub.lower(), re.UNICODE):", "#Test numbers #Formatting of time def _clean_formatting(self): #Remove unwanted blank lines self._clean_blank_lines() #Remove", "fileToRead: tempList.append(i) fileToRead.close() return tempList #Write a file def _write_file(self, path_to_file, toWrite): if", "self._sub_list[i], re.UNICODE), '%d/%m/%y %H:%M:%S,%f') #Extract end time and save in timedelta _time_end =", "self._sub_list[i] = re.sub(r'\\. \\. \\.', '...', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.', '..',", "_sign is not \"+\" and _sign is not \"-\": logger.log(\"TidySub : Bad sign", "= 0 # Count the number of occurences of the words for each", "i = 0 while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i]) and not", "self._sub_list[i] = re.sub(r'^\\-', '- ', self._sub_list[i], re.UNICODE) #Correct not regular expressions self._sub_list[i] =", "lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and not", "content self._sub_list[i] = re.sub(r'\\[[^)]*\\]', '', self._sub_list[i], re.UNICODE) #remove brackets split in two lines", "int)) or _second < 0 or _second >= 60: logger.log(\"TidySub : Second is", "from line def _clean_music(self): i = 0 while i < len(self._sub_list): if re.search(u'\\u266a',", "1 #Clean formatting #Remove blank lines #Test numbers #Formatting of time def _clean_formatting(self):", "+= 3 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\" and", "]+\\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\!', ' !', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^ ]+\",self._sub_list[i], re.UNICODE):", "and perform the cleaning and formatting actions\"\"\" def __init__(self, path_to_sub): #Boolean to stock", "_time_offset #create the new time range line self._sub_list[i] = str(_time_start.hour) + \":\" +", "+ string[9:len(string)] if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{2}$', string): string = string[0:9] + \"0\" + string[9:len(string)] return", "(not isinstance(_second, int)) or _second < 0 or _second >= 60: logger.log(\"TidySub :", "the second line #Check if it's a time range if re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i+1]): self._clean_time_range(i+1)", "self._sub_list[i] = re.sub(r'[ ]{2,}', ' ', self._sub_list[i], re.UNICODE) #Remove line with just a", "+ str(_time_end.microsecond/1000) #correct the time range line format self._clean_time_range(i) #re add EOL self._sub_list[i]", "re.sub(r'\\:', ' :', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)', ':", "number if re.match('^[0-9]+$', self._sub_list[i]): #First line must always be 1 if i ==", "def _detect_language(self, path_to_sub): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file was loaded\",", "French\", logger.DEBUG) return \"fr\" elif _count_english > _count_french: logger.log(\"TidySub : INFO: Guessed language", "continue #if 3rd line is not empty elif self._sub_list[i+3] == \"\" and not", "file if not force_language: _language = self._detect_language(self._path_to_sub) else: _language = force_language #If the", "self._sub_list[i] = re.sub(r'(?<=[A-Z]\\.)\\.(?!\\\")(?![A-Z]\\.)', '. ', self._sub_list[i], re.UNICODE) #Correct question mark if re.match(\"^.+[^ ]+\\?\",self._sub_list[i],", "re.UNICODE): _count_french += 1 if re.search(_english, self._sub_list[i].lower(), re.UNICODE): _count_english += 1 i +=", "self._sub_list[i+4] is not \"\" and re.match('^[0-9]+$', self._sub_list[i+5]): j += 3 elif self._sub_list[i+3] is", "self._sub_list[i], re.UNICODE) #remove parentheses split in two lines if i < (len(self._sub_list) -", "unwanted blank lines in the subtitles file def _clean_blank_lines(self): #Remove a blank line", "re.UNICODE) #remove leading and trailing spaces self._sub_list[i] = re.sub(r'^[ \\t]+|[ \\t]+$', '', self._sub_list[i],", "def _clean_punctuation_en(self): i = 0 while i < len(self._sub_list): if not re.match(r'^[0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+', self._sub_list[i])", "if not force_language: _language = self._detect_language(self._path_to_sub) else: _language = force_language #If the team", "re.UNICODE) #Correct colon if re.match(\"^.*[^ ]+\\:\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:', ' :', self._sub_list[i],", "if blank if self._sub_list[len(self._sub_list)-1] == \"\": del self._sub_list[len(self._sub_list)-1] def _clean_time_format(self, string): if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{3}$',", "self._path_to_sub = path_to_sub else: logger.log(\"TidySub : TidySub only corrects .srt files\", logger.DEBUG) return", "self._sub_list[i+3] == \"\" and re.match('^[0-9]+$', self._sub_list[i+4]): j += 2 elif self._sub_list[i+3] is not", "format self._clean_time_range(i) #re add EOL self._sub_list[i] += '\\r\\n' i += 1 #Write the", "or sub is in mkv\", logger.DEBUG) return except: try: fileToRead = codecs.open(path_to_file, \"r\",", "and re.match(r'^.*\\{', self._sub_list[i], re.UNICODE) and not re.match(r'\\}', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\}', self._sub_list[i+1], re.UNICODE):", "team strings must be removed if removeTeam: logger.log(\"TidySub : INFO: Removing teams names\",", "if i < (len(self._sub_list) - 1) and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and not re.match(r'\\)',", "line if (i+1) == len(self._sub_list)-1: del self._sub_list[i+1] continue elif (i+2) == len(self._sub_list)-1: break", "self._is_file_loaded = True #If the EOL must be removed if removeEOL: for i", "was loaded\", logger.DEBUG) return _correct = True # Check consistency of the parameters", "_clean_formatting(self): #Remove unwanted blank lines self._clean_blank_lines() #Remove BOM character self._sub_list[0] = re.sub(u'\\ufeff', '',", "correct punctuation in the subtitles file with regex def _clean_punctuation_en(self): i = 0", "timedelta _time_start = datetime.strptime('01/01/10 ' + re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE), '%d/%m/%y", "3 elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\" and self._sub_list[i+5]", "Subtitles file loaded\", logger.DEBUG) return #Load a text file into a list in", "the time range line format self._clean_time_range(i) #re add EOL self._sub_list[i] += '\\r\\n' i", "if (not isinstance(_minute, int)) or _minute < 0 or _minute >= 60: logger.log(\"TidySub", "if _sign == \"+\": _time_start += _time_offset _time_end += _time_offset elif _sign ==", "if re.match(\"^.+ \\!\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r' \\!', '!', self._sub_list[i], re.UNICODE) if re.match(\"^.+\\![^", "\"utf-8\") except: logger.log(\"TidySub : File not encoded in UTF-8 neither in latin-1\", logger.DEBUG)", "del self._sub_list[len(self._sub_list)-1] break #Check the second line #Check if it's a time range", "< len(self._sub_list): j = 1 #If the line is a number if re.match('^[0-9]+$',", "codecs.open(path_to_file, \"r\", \"latin-1\") except IOError: logger.log(\"TidySub : File does not exist or sub", "must be removed if removeTeam: logger.log(\"TidySub : INFO: Removing teams names\", logger.DEBUG) #Call", "cleaning and formatting actions\"\"\" def __init__(self, path_to_sub): #Boolean to stock if file is", "i += 1 #Clean formatting #Remove blank lines #Test numbers #Formatting of time", "' ;', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\;[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\;', '; ',", "+= _time_offset _time_end += _time_offset elif _sign == \"-\": _time_start -= _time_offset _time_end", "+ \")|(\".join(strings.get_guess_english(),True) + \"))\" + \"([ ]|$)\" _count_french = 0 _count_english = 0", "\"\" and not re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4] continue elif self._sub_list[i+3] is not \"\"", "in two lines if i < (len(self._sub_list) - 1) and re.match(r'^.*\\[', self._sub_list[i], re.UNICODE)", "re.match('^[0-9]+$', self._sub_list[i+6]): j += 4 count += 1 else: logger.log(\"TidySub : Formatting error", "#Return the language which has the highest count if _count_french > _count_english: logger.log(\"TidySub", "string = string[0:6] + \"0\" + string[6:len(string)] #correct ms if re.match(r'^[0-9]{2}\\:[0-9]{2}\\:[0-9]{2}\\,[0-9]{1}$', string): string", "self._sub_list[i] del self._sub_list[i] continue elif self._sub_list[i+2] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+3]): del", "a number if re.match('^[0-9]+$', self._sub_list[i]): #First line must always be 1 if i", "del self._sub_list[i+4] continue elif self._sub_list[i+3] is not \"\" and self._sub_list[i+4] is not \"\"", "re.UNICODE) i += 1 #French: Try to correct punctuation in the subtitles file", "self._clean_time_format(_end) #Main function to clean subtitles def Clean(self, removeHi=False, removeTeam=False, removeMusic=False, correct_punctuation=False, force_language", "logger.log(\"TidySub : TidySub only corrects .srt files\", logger.DEBUG) return self._team_list = list() self._sub_list", "file def _write_file(self, path_to_file, toWrite): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file", "single hyphen self._sub_list[i] = re.sub(r'^\\-$', '', self._sub_list[i], re.UNICODE) #delete empty balise self._sub_list[i] =", "'', self._sub_list[i+1], re.UNICODE) #remove name of speaker in front of the line self._sub_list[i]", "must be removed if removeEOL: for i in fileToRead: tempList.append(i.rstrip('\\n\\r')) else: for i", "= re.sub(r'\\:', ' :', self._sub_list[i], re.UNICODE) if re.match(\"^.*\\:[^ ]+\",self._sub_list[i], re.UNICODE): self._sub_list[i] = re.sub(r'\\:(?!\\\")(?![0-9]+)',", "return #Try to determine the language of the file if not force_language: _language", "is in mkv\", logger.DEBUG) return except: try: fileToRead = codecs.open(path_to_file, \"r\", \"utf-8\") except:", "the new time range line self._sub_list[i] = str(_time_start.hour) + \":\" + str(_time_start.minute) +", "re.sub(r'\\. \\. \\.', '...', self._sub_list[i], re.UNICODE) self._sub_list[i] = re.sub(r'\\. \\.', '..', self._sub_list[i], re.UNICODE)", "not \"\" and self._sub_list[i+4] == \"\" and not re.match('^[0-9]+$', self._sub_list[i+5]): del self._sub_list[i+4] continue", "else: return self._guess_language() def _guess_language(self): if not self._is_file_loaded: logger.log(\"TidySub : No subtitles file", "logger.DEBUG) self._clean_punctuation_en() #Clean the formatting before saving the subtitles self._clean_formatting() #Write file self._write_file(self._path_to_sub,", "self._sub_list) def Offset(self, _sign, _hour=0, _minute=0, _second=0, _ms=0): if not self._is_file_loaded: logger.log(\"TidySub :", "not None: logger.log(\"TidySub : INFO: Subtitles file loaded\", logger.DEBUG) return #Load a text", "return \"undefined\" #Test Regex for team words def _clean_team(self): #combine team names into", ": TidySub only corrects .srt files\", logger.DEBUG) return self._team_list = list() self._sub_list =", "python # -*- coding: utf-8 -*- import codecs import re from datetime import", "1) and re.match(r'^.*\\(', self._sub_list[i], re.UNICODE) and not re.match(r'\\)', self._sub_list[i], re.UNICODE) and re.match(r'^.*\\)', self._sub_list[i+1],", "for offset\", logger.DEBUG) _correct = False if (not isinstance(_ms, int)) or _ms <", "to correct punctuation in the subtitles file with regex def _clean_punctuation_fr(self): i =", "count = 1 else: self._sub_list[i] = str(count) #Exception if last line if i", "in timedelta _time_start = datetime.strptime('01/01/10 ' + re.sub(r' \\-\\-\\> [0-9]+\\:[0-9]+\\:[0-9]+\\,[0-9]+$', '', self._sub_list[i], re.UNICODE),", "else: logger.log(\"TidySub : Formatting error : timerange\", logger.DEBUG) else: logger.log(\"TidySub : Formatting error" ]
[ "search for pubkey in. external_address (Address): The external address of a wanted pubkey.", "pystratis.api import Model from pystratis.core.types import Address # noinspection PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A", "import Model from pystratis.core.types import Address # noinspection PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A request", "used for /wallet/pubkey endpoint. Args: wallet_name (str): The name of the wallet to", "endpoint. Args: wallet_name (str): The name of the wallet to search for pubkey", "address of a wanted pubkey. \"\"\" wallet_name: str = Field(alias='walletName') external_address: Address =", "external address of a wanted pubkey. \"\"\" wallet_name: str = Field(alias='walletName') external_address: Address", "in. external_address (Address): The external address of a wanted pubkey. \"\"\" wallet_name: str", "\"\"\"A request model used for /wallet/pubkey endpoint. Args: wallet_name (str): The name of", "Field from pystratis.api import Model from pystratis.core.types import Address # noinspection PyUnresolvedReferences class", "(Address): The external address of a wanted pubkey. \"\"\" wallet_name: str = Field(alias='walletName')", "noinspection PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A request model used for /wallet/pubkey endpoint. Args: wallet_name", "model used for /wallet/pubkey endpoint. Args: wallet_name (str): The name of the wallet", "of a wanted pubkey. \"\"\" wallet_name: str = Field(alias='walletName') external_address: Address = Field(alias='externalAddress')", "Address # noinspection PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A request model used for /wallet/pubkey endpoint.", "PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A request model used for /wallet/pubkey endpoint. Args: wallet_name (str):", "/wallet/pubkey endpoint. Args: wallet_name (str): The name of the wallet to search for", "(str): The name of the wallet to search for pubkey in. external_address (Address):", "import Address # noinspection PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A request model used for /wallet/pubkey", "name of the wallet to search for pubkey in. external_address (Address): The external", "class PubKeyRequest(Model): \"\"\"A request model used for /wallet/pubkey endpoint. Args: wallet_name (str): The", "the wallet to search for pubkey in. external_address (Address): The external address of", "wallet_name (str): The name of the wallet to search for pubkey in. external_address", "pubkey in. external_address (Address): The external address of a wanted pubkey. \"\"\" wallet_name:", "from pydantic import Field from pystratis.api import Model from pystratis.core.types import Address #", "external_address (Address): The external address of a wanted pubkey. \"\"\" wallet_name: str =", "import Field from pystratis.api import Model from pystratis.core.types import Address # noinspection PyUnresolvedReferences", "The name of the wallet to search for pubkey in. external_address (Address): The", "for pubkey in. external_address (Address): The external address of a wanted pubkey. \"\"\"", "from pystratis.api import Model from pystratis.core.types import Address # noinspection PyUnresolvedReferences class PubKeyRequest(Model):", "<reponame>TjadenFroyda/pyStratis from pydantic import Field from pystratis.api import Model from pystratis.core.types import Address", "to search for pubkey in. external_address (Address): The external address of a wanted", "# noinspection PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A request model used for /wallet/pubkey endpoint. Args:", "Model from pystratis.core.types import Address # noinspection PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A request model", "The external address of a wanted pubkey. \"\"\" wallet_name: str = Field(alias='walletName') external_address:", "Args: wallet_name (str): The name of the wallet to search for pubkey in.", "pystratis.core.types import Address # noinspection PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A request model used for", "request model used for /wallet/pubkey endpoint. Args: wallet_name (str): The name of the", "PubKeyRequest(Model): \"\"\"A request model used for /wallet/pubkey endpoint. Args: wallet_name (str): The name", "pydantic import Field from pystratis.api import Model from pystratis.core.types import Address # noinspection", "wallet to search for pubkey in. external_address (Address): The external address of a", "from pystratis.core.types import Address # noinspection PyUnresolvedReferences class PubKeyRequest(Model): \"\"\"A request model used", "for /wallet/pubkey endpoint. Args: wallet_name (str): The name of the wallet to search", "of the wallet to search for pubkey in. external_address (Address): The external address" ]
[ "start_job(job_id, task_id) -> Submission: worker_name = \"unknown_worker\" if os.getenv(\"WORKER_NAME\") is not None: worker_name", "result): resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"result\": result, \"task_id\":", "+ f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"result\": result, \"task_id\": task_id }) return resp", "json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id, result): resp = requests.get(API_BASE_URL +", "}) if resp.status_code != 200: raise Exception(resp.content) obj = json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]),", "task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id, result): resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token", "resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name, \"task_id\": task_id", "Submission: worker_name = \"unknown_worker\" if os.getenv(\"WORKER_NAME\") is not None: worker_name = os.getenv(\"WORKER_NAME\") resp", "= requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name, \"task_id\": task_id })", "def submit_job(job_id, task_id, result): resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={", "resp.status_code != 200: raise Exception(resp.content) obj = json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def", "worker_name = \"unknown_worker\" if os.getenv(\"WORKER_NAME\") is not None: worker_name = os.getenv(\"WORKER_NAME\") resp =", "int): return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int): return API_BASE_URL + f\"/submissions/{submission_id}/download/\" def", "+ f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id) -> Submission: worker_name = \"unknown_worker\" if os.getenv(\"WORKER_NAME\") is", "obj = json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id, result): resp =", "API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int): return API_BASE_URL + f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id)", "from client import Submission from settings import API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id: int): return", "int): return API_BASE_URL + f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id) -> Submission: worker_name = \"unknown_worker\"", "import Submission from settings import API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id: int): return API_BASE_URL +", "def get_agent_url(submission_id: int): return API_BASE_URL + f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id) -> Submission: worker_name", "task_id) -> Submission: worker_name = \"unknown_worker\" if os.getenv(\"WORKER_NAME\") is not None: worker_name =", "= \"unknown_worker\" if os.getenv(\"WORKER_NAME\") is not None: worker_name = os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL", "task_id, result): resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"result\": result,", "data={ \"worker_name\": worker_name, \"task_id\": task_id }) if resp.status_code != 200: raise Exception(resp.content) obj", "settings import API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id: int): return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id:", "os.getenv(\"WORKER_NAME\") is not None: worker_name = os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\":", "os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name, \"task_id\":", "import json import os import requests from client import Submission from settings import", "f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int): return API_BASE_URL + f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id) -> Submission:", "requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"result\": result, \"task_id\": task_id }) return", "submit_job(job_id, task_id, result): resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"result\":", "\"worker_name\": worker_name, \"task_id\": task_id }) if resp.status_code != 200: raise Exception(resp.content) obj =", "f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name, \"task_id\": task_id }) if resp.status_code != 200: raise", "+ f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int): return API_BASE_URL + f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id) ->", "Submission from settings import API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id: int): return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\"", "\"task_id\": task_id }) if resp.status_code != 200: raise Exception(resp.content) obj = json.loads(resp.content) return", "task_id }) if resp.status_code != 200: raise Exception(resp.content) obj = json.loads(resp.content) return Submission(sid=obj[\"submission\"],", "= json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id, result): resp = requests.get(API_BASE_URL", "from settings import API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id: int): return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def", "!= 200: raise Exception(resp.content) obj = json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id,", "def start_job(job_id, task_id) -> Submission: worker_name = \"unknown_worker\" if os.getenv(\"WORKER_NAME\") is not None:", "+ f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name, \"task_id\": task_id }) if resp.status_code", "return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int): return API_BASE_URL + f\"/submissions/{submission_id}/download/\" def start_job(job_id,", "headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name, \"task_id\": task_id }) if resp.status_code != 200:", "import os import requests from client import Submission from settings import API_BASE_URL, ACCESS_TOKEN", "resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"result\": result, \"task_id\": task_id", "import requests from client import Submission from settings import API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id:", "is not None: worker_name = os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token", "worker_name, \"task_id\": task_id }) if resp.status_code != 200: raise Exception(resp.content) obj = json.loads(resp.content)", "if os.getenv(\"WORKER_NAME\") is not None: worker_name = os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\",", "None: worker_name = os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={", "client import Submission from settings import API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id: int): return API_BASE_URL", "get_task_url(task_id: int): return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int): return API_BASE_URL + f\"/submissions/{submission_id}/download/\"", "def get_task_url(task_id: int): return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int): return API_BASE_URL +", "ACCESS_TOKEN def get_task_url(task_id: int): return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int): return API_BASE_URL", "get_agent_url(submission_id: int): return API_BASE_URL + f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id) -> Submission: worker_name =", "raise Exception(resp.content) obj = json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id, result):", "{ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name, \"task_id\": task_id }) if resp.status_code != 200: raise Exception(resp.content)", "-> Submission: worker_name = \"unknown_worker\" if os.getenv(\"WORKER_NAME\") is not None: worker_name = os.getenv(\"WORKER_NAME\")", "f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id) -> Submission: worker_name = \"unknown_worker\" if os.getenv(\"WORKER_NAME\") is not", "agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id, result): resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"},", "= requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"result\": result, \"task_id\": task_id })", "Exception(resp.content) obj = json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id, result): resp", "worker_name = os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\":", "not None: worker_name = os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"},", "Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id, result): resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\", headers={\"Authorization\":", "200: raise Exception(resp.content) obj = json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id,", "json import os import requests from client import Submission from settings import API_BASE_URL,", "requests from client import Submission from settings import API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id: int):", "API_BASE_URL + f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id) -> Submission: worker_name = \"unknown_worker\" if os.getenv(\"WORKER_NAME\")", "requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name, \"task_id\": task_id }) if", "if resp.status_code != 200: raise Exception(resp.content) obj = json.loads(resp.content) return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"]))", "return API_BASE_URL + f\"/submissions/{submission_id}/download/\" def start_job(job_id, task_id) -> Submission: worker_name = \"unknown_worker\" if", "os import requests from client import Submission from settings import API_BASE_URL, ACCESS_TOKEN def", "return Submission(sid=obj[\"submission\"], task_url=get_task_url(obj[\"task\"]), agent_url=get_agent_url(obj[\"submission\"])) def submit_job(job_id, task_id, result): resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/submit_job/\",", "API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id: int): return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int): return", "= os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL + f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name,", "\"unknown_worker\" if os.getenv(\"WORKER_NAME\") is not None: worker_name = os.getenv(\"WORKER_NAME\") resp = requests.get(API_BASE_URL +", "f\"/jobs/{job_id}/start_job/\", headers={\"Authorization\": f\"Token {ACCESS_TOKEN}\"}, data={ \"worker_name\": worker_name, \"task_id\": task_id }) if resp.status_code !=", "import API_BASE_URL, ACCESS_TOKEN def get_task_url(task_id: int): return API_BASE_URL + f\"/tasks/{task_id}/download_grader/\" def get_agent_url(submission_id: int):" ]
[ "= color def run(self, recalc=False): # Get a new task from the server", "try: gt.run(recalc) current_sleep = 10 except Exception as e: logging.exception(\"Failed when running thread\")", "self.server.get_recalc_task() else: task = self.server.get_task(self.client_name) return task @staticmethod def solve_task(task) -> (AnalyticsGraph, object):", "analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results (%d nodes)\" % (task['Id'], task['NodeCount'])) def get_task(self, recalc=False): if", "self.server = ServerUtil(base_url) self.color = color def run(self, recalc=False): # Get a new", "server: ServerUtil thread_id: int color: None def __init__(self, base_url, client_name, thread_id, color): self.client_name", "if recalc: task = self.server.get_recalc_task() else: task = self.server.get_task(self.client_name) return task @staticmethod def", "'\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m'", "logging from extended_networkx_tools import Analytics, AnalyticsGraph from timeit import default_timer as timer from", "in %sm %ss\" % (task['Id'], task['NodeCount'], time_minutes, time_seconds)) # Get the results results", "results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) # Upload the results to the server self.upload_results(results=results,", "-> (AnalyticsGraph, object): solve_type = task['SolveType'] if solve_type == 'diff': return Solvers.Diff.solve(task) elif", "except Exception as e: logging.exception(\"Failed when running thread\") gt.print('Crashed, restarting in %d seconds'", "# Calculate deltatime delta_time = end - start time_minutes = round((delta_time / 60)-0.49)", "Styles.ENDC)) class Styles: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKGREEN = '\\033[92m' WARNING", "% current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep += 10 client_name: str server: ServerUtil thread_id: int", "Analytics, AnalyticsGraph from timeit import default_timer as timer from utils import Solvers from", "AnalyticsGraph): worker_id = results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def", "return Solvers.Spec.solve(task) elif solve_type == 'random': return Solvers.Random.solve(task) elif solve_type == 'field' or", "results to the server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results (%d nodes)\" % (task['Id'],", "def print(self, msg, type=None): start_color = None if type is None: start_color =", "Solvers.Diff.solve(task) elif solve_type == 'spec': return Solvers.Spec.solve(task) elif solve_type == 'random': return Solvers.Random.solve(task)", "seconds' % current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep += 10 client_name: str server: ServerUtil thread_id:", "when running thread\") gt.print('Crashed, restarting in %d seconds' % current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep", "- start time_minutes = round((delta_time / 60)-0.49) time_seconds = round(delta_time % 60) self.print(\"(%d)", "Solvers.Random.solve(task) elif solve_type == 'field' or solve_type == 'dfield' or solve_type == 'sfield'", "self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg, type=None): start_color = None", "% (task['Id'], task['NodeCount'])) def get_task(self, recalc=False): if recalc: task = self.server.get_recalc_task() else: task", "% 60) self.print(\"(%d) Solved graph (%d nodes) in %sm %ss\" % (task['Id'], task['NodeCount'],", "+= 10 client_name: str server: ServerUtil thread_id: int color: None def __init__(self, base_url,", "nodes), type %s\" % (task['Id'], task['NodeCount'], task['SolveType'])) # Solve it and get a", "import time import logging from extended_networkx_tools import Analytics, AnalyticsGraph from timeit import default_timer", "logging.exception(\"Failed when running thread\") gt.print('Crashed, restarting in %d seconds' % current_sleep, Styles.FAIL) time.sleep(current_sleep)", "== 'spec': return Solvers.Spec.solve(task) elif solve_type == 'random': return Solvers.Random.solve(task) elif solve_type ==", "import datetime class GraphThread: @staticmethod def start_thread(base_url, client_name, thread_id, color=None, recalc=False): current_sleep =", "extended_networkx_tools import Analytics, AnalyticsGraph from timeit import default_timer as timer from utils import", "Calculate deltatime delta_time = end - start time_minutes = round((delta_time / 60)-0.49) time_seconds", "recalc=False): if recalc: task = self.server.get_recalc_task() else: task = self.server.get_task(self.client_name) return task @staticmethod", "elif solve_type == 'field' or solve_type == 'dfield' or solve_type == 'sfield' or", "from the server task = self.get_task(recalc) self.print(\"(%d) Received graph (%d nodes), type %s\"", "'spec': return Solvers.Spec.solve(task) elif solve_type == 'random': return Solvers.Random.solve(task) elif solve_type == 'field'", "= self.color ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d: %s%s\" % (Styles.BOLD, ts, Styles.ENDC,", "# Get a new task from the server task = self.get_task(recalc) self.print(\"(%d) Received", "thread_id: int color: None def __init__(self, base_url, client_name, thread_id, color): self.client_name = client_name", "return Solvers.Diff.solve(task) elif solve_type == 'spec': return Solvers.Spec.solve(task) elif solve_type == 'random': return", "a new task from the server task = self.get_task(recalc) self.print(\"(%d) Received graph (%d", "start = timer() analytics_graph, custom_data = self.solve_task(task=task) end = timer() # Calculate deltatime", "P%d: %s%s\" % (Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC)) class Styles: HEADER", "Styles: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKGREEN = '\\033[92m' WARNING = '\\033[93m'", "task = self.server.get_recalc_task() else: task = self.server.get_task(self.client_name) return task @staticmethod def solve_task(task) ->", "results results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) # Upload the results to the server", "task from the server task = self.get_task(recalc) self.print(\"(%d) Received graph (%d nodes), type", "utils.GraphUtils import GraphUtils from utils.ServerUtil import ServerUtil from datetime import datetime class GraphThread:", "running thread\") gt.print('Crashed, restarting in %d seconds' % current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep +=", "type is None: start_color = self.color ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d: %s%s\"", "Upload the results to the server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results (%d nodes)\"", "timer from utils import Solvers from utils.GraphUtils import GraphUtils from utils.ServerUtil import ServerUtil", "# Get the results results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) # Upload the results", "{'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg, type=None): start_color = None if type is None:", "True: try: gt.run(recalc) current_sleep = 10 except Exception as e: logging.exception(\"Failed when running", "current_sleep = 10 gt = GraphThread(base_url, client_name, thread_id, color) while True: try: gt.run(recalc)", "ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC)) class Styles: HEADER = '\\033[95m' OKBLUE =", "None: start_color = self.color ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d: %s%s\" % (Styles.BOLD,", "= datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d: %s%s\" % (Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg,", "thread_id, color=None, recalc=False): current_sleep = 10 gt = GraphThread(base_url, client_name, thread_id, color) while", "HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL", "task @staticmethod def solve_task(task) -> (AnalyticsGraph, object): solve_type = task['SolveType'] if solve_type ==", "Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg, type=None): start_color = None if type", "== 'random': return Solvers.Random.solve(task) elif solve_type == 'field' or solve_type == 'dfield' or", "AnalyticsGraph from timeit import default_timer as timer from utils import Solvers from utils.GraphUtils", "= GraphThread(base_url, client_name, thread_id, color) while True: try: gt.run(recalc) current_sleep = 10 except", "10 except Exception as e: logging.exception(\"Failed when running thread\") gt.print('Crashed, restarting in %d", "recalc: task = self.server.get_recalc_task() else: task = self.server.get_task(self.client_name) return task @staticmethod def solve_task(task)", "def start_thread(base_url, client_name, thread_id, color=None, recalc=False): current_sleep = 10 gt = GraphThread(base_url, client_name,", "# Upload the results to the server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results (%d", "as e: logging.exception(\"Failed when running thread\") gt.print('Crashed, restarting in %d seconds' % current_sleep,", "Solvers.Field.solve(task) else: return Solvers.Random.solve(task) def upload_results(self, results, analytics_graph: AnalyticsGraph): worker_id = results['Id'] self.server.upload_results(worker_id,", "time import logging from extended_networkx_tools import Analytics, AnalyticsGraph from timeit import default_timer as", "self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg, type=None): start_color", "60)-0.49) time_seconds = round(delta_time % 60) self.print(\"(%d) Solved graph (%d nodes) in %sm", "%s P%d: %s%s\" % (Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC)) class Styles:", "utils import Solvers from utils.GraphUtils import GraphUtils from utils.ServerUtil import ServerUtil from datetime", "get a graph start = timer() analytics_graph, custom_data = self.solve_task(task=task) end = timer()", "analytics_graph, custom_data = self.solve_task(task=task) end = timer() # Calculate deltatime delta_time = end", "start_thread(base_url, client_name, thread_id, color=None, recalc=False): current_sleep = 10 gt = GraphThread(base_url, client_name, thread_id,", "= round((delta_time / 60)-0.49) time_seconds = round(delta_time % 60) self.print(\"(%d) Solved graph (%d", "# Solve it and get a graph start = timer() analytics_graph, custom_data =", "client_name, thread_id, color=None, recalc=False): current_sleep = 10 gt = GraphThread(base_url, client_name, thread_id, color)", "% (Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC)) class Styles: HEADER = '\\033[95m'", "task['NodeCount'], time_minutes, time_seconds)) # Get the results results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) #", "== 'field' or solve_type == 'dfield' or solve_type == 'sfield' or solve_type ==", "and get a graph start = timer() analytics_graph, custom_data = self.solve_task(task=task) end =", "round((delta_time / 60)-0.49) time_seconds = round(delta_time % 60) self.print(\"(%d) Solved graph (%d nodes)", "current_sleep = 10 except Exception as e: logging.exception(\"Failed when running thread\") gt.print('Crashed, restarting", "ServerUtil thread_id: int color: None def __init__(self, base_url, client_name, thread_id, color): self.client_name =", "str server: ServerUtil thread_id: int color: None def __init__(self, base_url, client_name, thread_id, color):", "worker_id = results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self,", "return Solvers.Random.solve(task) def upload_results(self, results, analytics_graph: AnalyticsGraph): worker_id = results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id,", "= timer() analytics_graph, custom_data = self.solve_task(task=task) end = timer() # Calculate deltatime delta_time", "(AnalyticsGraph, object): solve_type = task['SolveType'] if solve_type == 'diff': return Solvers.Diff.solve(task) elif solve_type", "recalc=False): current_sleep = 10 gt = GraphThread(base_url, client_name, thread_id, color) while True: try:", "start_color = self.color ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d: %s%s\" % (Styles.BOLD, ts,", "is None: start_color = self.color ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d: %s%s\" %", "graph (%d nodes), type %s\" % (task['Id'], task['NodeCount'], task['SolveType'])) # Solve it and", "while True: try: gt.run(recalc) current_sleep = 10 except Exception as e: logging.exception(\"Failed when", "solve_type == 'field' or solve_type == 'dfield' or solve_type == 'sfield' or solve_type", "GraphThread(base_url, client_name, thread_id, color) while True: try: gt.run(recalc) current_sleep = 10 except Exception", "timeit import default_timer as timer from utils import Solvers from utils.GraphUtils import GraphUtils", "from extended_networkx_tools import Analytics, AnalyticsGraph from timeit import default_timer as timer from utils", "task = self.get_task(recalc) self.print(\"(%d) Received graph (%d nodes), type %s\" % (task['Id'], task['NodeCount'],", "% (task['Id'], task['NodeCount'], task['SolveType'])) # Solve it and get a graph start =", "print(self, msg, type=None): start_color = None if type is None: start_color = self.color", "color: None def __init__(self, base_url, client_name, thread_id, color): self.client_name = client_name self.thread_id =", "Solved graph (%d nodes) in %sm %ss\" % (task['Id'], task['NodeCount'], time_minutes, time_seconds)) #", "60) self.print(\"(%d) Solved graph (%d nodes) in %sm %ss\" % (task['Id'], task['NodeCount'], time_minutes,", "datetime import datetime class GraphThread: @staticmethod def start_thread(base_url, client_name, thread_id, color=None, recalc=False): current_sleep", "task['SolveType'] if solve_type == 'diff': return Solvers.Diff.solve(task) elif solve_type == 'spec': return Solvers.Spec.solve(task)", "base_url, client_name, thread_id, color): self.client_name = client_name self.thread_id = thread_id self.server = ServerUtil(base_url)", "start_color = None if type is None: start_color = self.color ts = datetime.now().strftime('%H:%M:%S')", "or solve_type == 'sfield_fr': return Solvers.Field.solve(task) else: return Solvers.Random.solve(task) def upload_results(self, results, analytics_graph:", "int color: None def __init__(self, base_url, client_name, thread_id, color): self.client_name = client_name self.thread_id", "@staticmethod def start_thread(base_url, client_name, thread_id, color=None, recalc=False): current_sleep = 10 gt = GraphThread(base_url,", "the results results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) # Upload the results to the", "client_name self.thread_id = thread_id self.server = ServerUtil(base_url) self.color = color def run(self, recalc=False):", "'diff': return Solvers.Diff.solve(task) elif solve_type == 'spec': return Solvers.Spec.solve(task) elif solve_type == 'random':", "return Solvers.Random.solve(task) elif solve_type == 'field' or solve_type == 'dfield' or solve_type ==", "self.thread_id = thread_id self.server = ServerUtil(base_url) self.color = color def run(self, recalc=False): #", "in %d seconds' % current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep += 10 client_name: str server:", "new task from the server task = self.get_task(recalc) self.print(\"(%d) Received graph (%d nodes),", "task['NodeCount'], task['SolveType'])) # Solve it and get a graph start = timer() analytics_graph,", "gt.print('Crashed, restarting in %d seconds' % current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep += 10 client_name:", "import Solvers from utils.GraphUtils import GraphUtils from utils.ServerUtil import ServerUtil from datetime import", "else: return Solvers.Random.solve(task) def upload_results(self, results, analytics_graph: AnalyticsGraph): worker_id = results['Id'] self.server.upload_results(worker_id, results)", "(task['Id'], task['NodeCount'], task['SolveType'])) # Solve it and get a graph start = timer()", "Exception as e: logging.exception(\"Failed when running thread\") gt.print('Crashed, restarting in %d seconds' %", "OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD", "self.print(\"(%d) Received graph (%d nodes), type %s\" % (task['Id'], task['NodeCount'], task['SolveType'])) # Solve", "server task = self.get_task(recalc) self.print(\"(%d) Received graph (%d nodes), type %s\" % (task['Id'],", "timer() # Calculate deltatime delta_time = end - start time_minutes = round((delta_time /", "WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE", "10 client_name: str server: ServerUtil thread_id: int color: None def __init__(self, base_url, client_name,", "= None if type is None: start_color = self.color ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s", "upload_results(self, results, analytics_graph: AnalyticsGraph): worker_id = results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id,", "results, analytics_graph: AnalyticsGraph): worker_id = results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges':", "solve_type == 'diff': return Solvers.Diff.solve(task) elif solve_type == 'spec': return Solvers.Spec.solve(task) elif solve_type", "task = self.server.get_task(self.client_name) return task @staticmethod def solve_task(task) -> (AnalyticsGraph, object): solve_type =", "'\\033[95m' OKBLUE = '\\033[94m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m'", "results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg, type=None): start_color =", "self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results (%d nodes)\" % (task['Id'], task['NodeCount'])) def get_task(self, recalc=False):", "Solvers.Spec.solve(task) elif solve_type == 'random': return Solvers.Random.solve(task) elif solve_type == 'field' or solve_type", "OKBLUE = '\\033[94m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC", "= '\\033[95m' OKBLUE = '\\033[94m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL =", "solve_type == 'spec': return Solvers.Spec.solve(task) elif solve_type == 'random': return Solvers.Random.solve(task) elif solve_type", "def run(self, recalc=False): # Get a new task from the server task =", "GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) # Upload the results to the server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d)", "Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg, type=None): start_color = None if type is None: start_color", "= round(delta_time % 60) self.print(\"(%d) Solved graph (%d nodes) in %sm %ss\" %", "a graph start = timer() analytics_graph, custom_data = self.solve_task(task=task) end = timer() #", "% (task['Id'], task['NodeCount'], time_minutes, time_seconds)) # Get the results results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task,", "import GraphUtils from utils.ServerUtil import ServerUtil from datetime import datetime class GraphThread: @staticmethod", "task['NodeCount'])) def get_task(self, recalc=False): if recalc: task = self.server.get_recalc_task() else: task = self.server.get_task(self.client_name)", "self.color = color def run(self, recalc=False): # Get a new task from the", "msg, type=None): start_color = None if type is None: start_color = self.color ts", "object): solve_type = task['SolveType'] if solve_type == 'diff': return Solvers.Diff.solve(task) elif solve_type ==", "ServerUtil(base_url) self.color = color def run(self, recalc=False): # Get a new task from", "solve_type == 'sfield_fr': return Solvers.Field.solve(task) else: return Solvers.Random.solve(task) def upload_results(self, results, analytics_graph: AnalyticsGraph):", "time.sleep(current_sleep) current_sleep += 10 client_name: str server: ServerUtil thread_id: int color: None def", "= results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg,", "time_minutes, time_seconds)) # Get the results results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) # Upload", "= ServerUtil(base_url) self.color = color def run(self, recalc=False): # Get a new task", "utils.ServerUtil import ServerUtil from datetime import datetime class GraphThread: @staticmethod def start_thread(base_url, client_name,", "or solve_type == 'sfield' or solve_type == 'sfield_fr': return Solvers.Field.solve(task) else: return Solvers.Random.solve(task)", "%s%s\" % (Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC)) class Styles: HEADER =", "= self.solve_task(task=task) end = timer() # Calculate deltatime delta_time = end - start", "GraphUtils from utils.ServerUtil import ServerUtil from datetime import datetime class GraphThread: @staticmethod def", "start_color, self.thread_id, msg, Styles.ENDC)) class Styles: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKGREEN", "e: logging.exception(\"Failed when running thread\") gt.print('Crashed, restarting in %d seconds' % current_sleep, Styles.FAIL)", "= timer() # Calculate deltatime delta_time = end - start time_minutes = round((delta_time", "graph start = timer() analytics_graph, custom_data = self.solve_task(task=task) end = timer() # Calculate", "self.thread_id, msg, Styles.ENDC)) class Styles: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKGREEN =", "thread_id, color): self.client_name = client_name self.thread_id = thread_id self.server = ServerUtil(base_url) self.color =", "get_task(self, recalc=False): if recalc: task = self.server.get_recalc_task() else: task = self.server.get_task(self.client_name) return task", "the server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results (%d nodes)\" % (task['Id'], task['NodeCount'])) def", "import Analytics, AnalyticsGraph from timeit import default_timer as timer from utils import Solvers", "time_seconds = round(delta_time % 60) self.print(\"(%d) Solved graph (%d nodes) in %sm %ss\"", "the server task = self.get_task(recalc) self.print(\"(%d) Received graph (%d nodes), type %s\" %", "(%d nodes)\" % (task['Id'], task['NodeCount'])) def get_task(self, recalc=False): if recalc: task = self.server.get_recalc_task()", "= 10 except Exception as e: logging.exception(\"Failed when running thread\") gt.print('Crashed, restarting in", "thread\") gt.print('Crashed, restarting in %d seconds' % current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep += 10", "results (%d nodes)\" % (task['Id'], task['NodeCount'])) def get_task(self, recalc=False): if recalc: task =", "color): self.client_name = client_name self.thread_id = thread_id self.server = ServerUtil(base_url) self.color = color", "to the server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results (%d nodes)\" % (task['Id'], task['NodeCount']))", "'dfield' or solve_type == 'sfield' or solve_type == 'sfield_fr': return Solvers.Field.solve(task) else: return", "== 'sfield_fr': return Solvers.Field.solve(task) else: return Solvers.Random.solve(task) def upload_results(self, results, analytics_graph: AnalyticsGraph): worker_id", "results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg, type=None):", "= 10 gt = GraphThread(base_url, client_name, thread_id, color) while True: try: gt.run(recalc) current_sleep", "ServerUtil from datetime import datetime class GraphThread: @staticmethod def start_thread(base_url, client_name, thread_id, color=None,", "it and get a graph start = timer() analytics_graph, custom_data = self.solve_task(task=task) end", "or solve_type == 'dfield' or solve_type == 'sfield' or solve_type == 'sfield_fr': return", "round(delta_time % 60) self.print(\"(%d) Solved graph (%d nodes) in %sm %ss\" % (task['Id'],", "thread_id self.server = ServerUtil(base_url) self.color = color def run(self, recalc=False): # Get a", "= GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) # Upload the results to the server self.upload_results(results=results, analytics_graph=analytics_graph)", "ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d: %s%s\" % (Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id,", "end = timer() # Calculate deltatime delta_time = end - start time_minutes =", "self.print(\"(%d) Uploaded results (%d nodes)\" % (task['Id'], task['NodeCount'])) def get_task(self, recalc=False): if recalc:", "10 gt = GraphThread(base_url, client_name, thread_id, color) while True: try: gt.run(recalc) current_sleep =", "Get the results results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) # Upload the results to", "{'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg, type=None): start_color = None if", "datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d: %s%s\" % (Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC))", "self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())}) def print(self, msg, type=None): start_color = None if type is", "= self.server.get_task(self.client_name) return task @staticmethod def solve_task(task) -> (AnalyticsGraph, object): solve_type = task['SolveType']", "self.get_task(recalc) self.print(\"(%d) Received graph (%d nodes), type %s\" % (task['Id'], task['NodeCount'], task['SolveType'])) #", "class Styles: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKGREEN = '\\033[92m' WARNING =", "Solvers from utils.GraphUtils import GraphUtils from utils.ServerUtil import ServerUtil from datetime import datetime", "== 'diff': return Solvers.Diff.solve(task) elif solve_type == 'spec': return Solvers.Spec.solve(task) elif solve_type ==", "time_seconds)) # Get the results results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data) # Upload the", "(%d nodes), type %s\" % (task['Id'], task['NodeCount'], task['SolveType'])) # Solve it and get", "self.solve_task(task=task) end = timer() # Calculate deltatime delta_time = end - start time_minutes", "(Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC)) class Styles: HEADER = '\\033[95m' OKBLUE", "as timer from utils import Solvers from utils.GraphUtils import GraphUtils from utils.ServerUtil import", "if type is None: start_color = self.color ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d:", "import logging from extended_networkx_tools import Analytics, AnalyticsGraph from timeit import default_timer as timer", "msg, Styles.ENDC)) class Styles: HEADER = '\\033[95m' OKBLUE = '\\033[94m' OKGREEN = '\\033[92m'", "'random': return Solvers.Random.solve(task) elif solve_type == 'field' or solve_type == 'dfield' or solve_type", "return Solvers.Field.solve(task) else: return Solvers.Random.solve(task) def upload_results(self, results, analytics_graph: AnalyticsGraph): worker_id = results['Id']", "Received graph (%d nodes), type %s\" % (task['Id'], task['NodeCount'], task['SolveType'])) # Solve it", "if solve_type == 'diff': return Solvers.Diff.solve(task) elif solve_type == 'spec': return Solvers.Spec.solve(task) elif", "solve_type = task['SolveType'] if solve_type == 'diff': return Solvers.Diff.solve(task) elif solve_type == 'spec':", "self.print(\"(%d) Solved graph (%d nodes) in %sm %ss\" % (task['Id'], task['NodeCount'], time_minutes, time_seconds))", "Solve it and get a graph start = timer() analytics_graph, custom_data = self.solve_task(task=task)", "'field' or solve_type == 'dfield' or solve_type == 'sfield' or solve_type == 'sfield_fr':", "Get a new task from the server task = self.get_task(recalc) self.print(\"(%d) Received graph", "elif solve_type == 'spec': return Solvers.Spec.solve(task) elif solve_type == 'random': return Solvers.Random.solve(task) elif", "def __init__(self, base_url, client_name, thread_id, color): self.client_name = client_name self.thread_id = thread_id self.server", "current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep += 10 client_name: str server: ServerUtil thread_id: int color:", "== 'dfield' or solve_type == 'sfield' or solve_type == 'sfield_fr': return Solvers.Field.solve(task) else:", "print(\"%s%s%s %s P%d: %s%s\" % (Styles.BOLD, ts, Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC)) class", "%s\" % (task['Id'], task['NodeCount'], task['SolveType'])) # Solve it and get a graph start", "color def run(self, recalc=False): # Get a new task from the server task", "elif solve_type == 'random': return Solvers.Random.solve(task) elif solve_type == 'field' or solve_type ==", "self.server.get_task(self.client_name) return task @staticmethod def solve_task(task) -> (AnalyticsGraph, object): solve_type = task['SolveType'] if", "nodes) in %sm %ss\" % (task['Id'], task['NodeCount'], time_minutes, time_seconds)) # Get the results", "current_sleep += 10 client_name: str server: ServerUtil thread_id: int color: None def __init__(self,", "solve_task(task) -> (AnalyticsGraph, object): solve_type = task['SolveType'] if solve_type == 'diff': return Solvers.Diff.solve(task)", "gt = GraphThread(base_url, client_name, thread_id, color) while True: try: gt.run(recalc) current_sleep = 10", "gt.run(recalc) current_sleep = 10 except Exception as e: logging.exception(\"Failed when running thread\") gt.print('Crashed,", "thread_id, color) while True: try: gt.run(recalc) current_sleep = 10 except Exception as e:", "%ss\" % (task['Id'], task['NodeCount'], time_minutes, time_seconds)) # Get the results results = GraphUtils.get_results(analytics_graph=analytics_graph,", "solve_type == 'dfield' or solve_type == 'sfield' or solve_type == 'sfield_fr': return Solvers.Field.solve(task)", "%d seconds' % current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep += 10 client_name: str server: ServerUtil", "(task['Id'], task['NodeCount'])) def get_task(self, recalc=False): if recalc: task = self.server.get_recalc_task() else: task =", "== 'sfield' or solve_type == 'sfield_fr': return Solvers.Field.solve(task) else: return Solvers.Random.solve(task) def upload_results(self,", "= '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE =", "custom_data = self.solve_task(task=task) end = timer() # Calculate deltatime delta_time = end -", "from datetime import datetime class GraphThread: @staticmethod def start_thread(base_url, client_name, thread_id, color=None, recalc=False):", "server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results (%d nodes)\" % (task['Id'], task['NodeCount'])) def get_task(self,", "None if type is None: start_color = self.color ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s", "delta_time = end - start time_minutes = round((delta_time / 60)-0.49) time_seconds = round(delta_time", "recalc=False): # Get a new task from the server task = self.get_task(recalc) self.print(\"(%d)", "= self.get_task(recalc) self.print(\"(%d) Received graph (%d nodes), type %s\" % (task['Id'], task['NodeCount'], task['SolveType']))", "'\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD = '\\033[1m' UNDERLINE = '\\033[4m'", "client_name, thread_id, color): self.client_name = client_name self.thread_id = thread_id self.server = ServerUtil(base_url) self.color", "nodes)\" % (task['Id'], task['NodeCount'])) def get_task(self, recalc=False): if recalc: task = self.server.get_recalc_task() else:", "GraphThread: @staticmethod def start_thread(base_url, client_name, thread_id, color=None, recalc=False): current_sleep = 10 gt =", "restarting in %d seconds' % current_sleep, Styles.FAIL) time.sleep(current_sleep) current_sleep += 10 client_name: str", "deltatime delta_time = end - start time_minutes = round((delta_time / 60)-0.49) time_seconds =", "default_timer as timer from utils import Solvers from utils.GraphUtils import GraphUtils from utils.ServerUtil", "graph (%d nodes) in %sm %ss\" % (task['Id'], task['NodeCount'], time_minutes, time_seconds)) # Get", "from timeit import default_timer as timer from utils import Solvers from utils.GraphUtils import", "from utils.ServerUtil import ServerUtil from datetime import datetime class GraphThread: @staticmethod def start_thread(base_url,", "run(self, recalc=False): # Get a new task from the server task = self.get_task(recalc)", "type %s\" % (task['Id'], task['NodeCount'], task['SolveType'])) # Solve it and get a graph", "timer() analytics_graph, custom_data = self.solve_task(task=task) end = timer() # Calculate deltatime delta_time =", "end - start time_minutes = round((delta_time / 60)-0.49) time_seconds = round(delta_time % 60)", "self.color ts = datetime.now().strftime('%H:%M:%S') print(\"%s%s%s %s P%d: %s%s\" % (Styles.BOLD, ts, Styles.ENDC, start_color,", "Uploaded results (%d nodes)\" % (task['Id'], task['NodeCount'])) def get_task(self, recalc=False): if recalc: task", "custom_data=custom_data) # Upload the results to the server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results", "import ServerUtil from datetime import datetime class GraphThread: @staticmethod def start_thread(base_url, client_name, thread_id,", "= task['SolveType'] if solve_type == 'diff': return Solvers.Diff.solve(task) elif solve_type == 'spec': return", "= self.server.get_recalc_task() else: task = self.server.get_task(self.client_name) return task @staticmethod def solve_task(task) -> (AnalyticsGraph,", "= client_name self.thread_id = thread_id self.server = ServerUtil(base_url) self.color = color def run(self,", "solve_type == 'sfield' or solve_type == 'sfield_fr': return Solvers.Field.solve(task) else: return Solvers.Random.solve(task) def", "= end - start time_minutes = round((delta_time / 60)-0.49) time_seconds = round(delta_time %", "def get_task(self, recalc=False): if recalc: task = self.server.get_recalc_task() else: task = self.server.get_task(self.client_name) return", "type=None): start_color = None if type is None: start_color = self.color ts =", "from utils.GraphUtils import GraphUtils from utils.ServerUtil import ServerUtil from datetime import datetime class", "Styles.FAIL) time.sleep(current_sleep) current_sleep += 10 client_name: str server: ServerUtil thread_id: int color: None", "'sfield' or solve_type == 'sfield_fr': return Solvers.Field.solve(task) else: return Solvers.Random.solve(task) def upload_results(self, results,", "= thread_id self.server = ServerUtil(base_url) self.color = color def run(self, recalc=False): # Get", "class GraphThread: @staticmethod def start_thread(base_url, client_name, thread_id, color=None, recalc=False): current_sleep = 10 gt", "/ 60)-0.49) time_seconds = round(delta_time % 60) self.print(\"(%d) Solved graph (%d nodes) in", "task=task, custom_data=custom_data) # Upload the results to the server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded", "%sm %ss\" % (task['Id'], task['NodeCount'], time_minutes, time_seconds)) # Get the results results =", "from utils import Solvers from utils.GraphUtils import GraphUtils from utils.ServerUtil import ServerUtil from", "import default_timer as timer from utils import Solvers from utils.GraphUtils import GraphUtils from", "@staticmethod def solve_task(task) -> (AnalyticsGraph, object): solve_type = task['SolveType'] if solve_type == 'diff':", "Styles.ENDC, start_color, self.thread_id, msg, Styles.ENDC)) class Styles: HEADER = '\\033[95m' OKBLUE = '\\033[94m'", "client_name, thread_id, color) while True: try: gt.run(recalc) current_sleep = 10 except Exception as", "color) while True: try: gt.run(recalc) current_sleep = 10 except Exception as e: logging.exception(\"Failed", "(%d nodes) in %sm %ss\" % (task['Id'], task['NodeCount'], time_minutes, time_seconds)) # Get the", "'\\033[94m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m'", "datetime class GraphThread: @staticmethod def start_thread(base_url, client_name, thread_id, color=None, recalc=False): current_sleep = 10", "the results to the server self.upload_results(results=results, analytics_graph=analytics_graph) self.print(\"(%d) Uploaded results (%d nodes)\" %", "color=None, recalc=False): current_sleep = 10 gt = GraphThread(base_url, client_name, thread_id, color) while True:", "solve_type == 'random': return Solvers.Random.solve(task) elif solve_type == 'field' or solve_type == 'dfield'", "__init__(self, base_url, client_name, thread_id, color): self.client_name = client_name self.thread_id = thread_id self.server =", "'sfield_fr': return Solvers.Field.solve(task) else: return Solvers.Random.solve(task) def upload_results(self, results, analytics_graph: AnalyticsGraph): worker_id =", "return task @staticmethod def solve_task(task) -> (AnalyticsGraph, object): solve_type = task['SolveType'] if solve_type", "def upload_results(self, results, analytics_graph: AnalyticsGraph): worker_id = results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())})", "task['SolveType'])) # Solve it and get a graph start = timer() analytics_graph, custom_data", "start time_minutes = round((delta_time / 60)-0.49) time_seconds = round(delta_time % 60) self.print(\"(%d) Solved", "self.client_name = client_name self.thread_id = thread_id self.server = ServerUtil(base_url) self.color = color def", "= '\\033[94m' OKGREEN = '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC =", "None def __init__(self, base_url, client_name, thread_id, color): self.client_name = client_name self.thread_id = thread_id", "analytics_graph: AnalyticsGraph): worker_id = results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes': Analytics.get_node_dict(analytics_graph.graph())}) self.server.upload_results(worker_id, {'Edges': Analytics.get_edge_dict(analytics_graph.graph())})", "def solve_task(task) -> (AnalyticsGraph, object): solve_type = task['SolveType'] if solve_type == 'diff': return", "= '\\033[92m' WARNING = '\\033[93m' FAIL = '\\033[91m' ENDC = '\\033[0m' BOLD =", "(task['Id'], task['NodeCount'], time_minutes, time_seconds)) # Get the results results = GraphUtils.get_results(analytics_graph=analytics_graph, task=task, custom_data=custom_data)", "else: task = self.server.get_task(self.client_name) return task @staticmethod def solve_task(task) -> (AnalyticsGraph, object): solve_type", "client_name: str server: ServerUtil thread_id: int color: None def __init__(self, base_url, client_name, thread_id,", "Solvers.Random.solve(task) def upload_results(self, results, analytics_graph: AnalyticsGraph): worker_id = results['Id'] self.server.upload_results(worker_id, results) self.server.upload_results(worker_id, {'Nodes':", "time_minutes = round((delta_time / 60)-0.49) time_seconds = round(delta_time % 60) self.print(\"(%d) Solved graph" ]
[]
[ "forms.CharField(max_length=8) # first_name = forms.CharField(max_length=10) # last_name = forms.CharField(max_length=10) # # sacco =", "the time being')), ('Blacklisted', ('Blacklisted from operating')) ] class VehicleForm(forms.Form): # sacco =", "= forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description = forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search for a driver.\"\"\"", "get_sacco(self): # \"\"\"Return the name of the sacco.\"\"\" # return self.sacco def get_regno(self):", "last_name = forms.CharField(max_length=10) # # sacco = forms.CharField(max_length=10) # email = forms.CharField(max_length=15) #", "forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description = forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search for a driver.\"\"\" national_id", "being')), ('Blacklisted', ('Blacklisted from operating')) ] class VehicleForm(forms.Form): # sacco = forms.CharField(label=\"Sacco\", max_length=100)", "('Blacklisted', ('Blacklisted from operating')) ] class VehicleForm(forms.Form): # sacco = forms.CharField(label=\"Sacco\", max_length=100) regno", "# \"\"\"Return the name of the sacco.\"\"\" # return self.sacco def get_regno(self): \"\"\"Return", "= forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search for a driver.\"\"\" national_id = forms.CharField(max_length=10, help_text=\"Enter driver", "max_length=7) # def get_sacco(self): # \"\"\"Return the name of the sacco.\"\"\" # return", ") description = forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search for a driver.\"\"\" national_id = forms.CharField(max_length=10,", "Number\", max_length=7) # def get_sacco(self): # \"\"\"Return the name of the sacco.\"\"\" #", "sacco = forms.CharField(max_length=10) # email = forms.CharField(max_length=15) # phone_number = forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form):", "def get_regno(self): \"\"\"Return the regno of the vehicle.\"\"\" return self.regno class DriverForm(forms.Form): \"\"\"Viewset", "forms.CharField(label=\"Registration Number\", max_length=7) # def get_sacco(self): # \"\"\"Return the name of the sacco.\"\"\"", "= forms.CharField(max_length=15) # phone_number = forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS)", "the vehicle.\"\"\" return self.regno class DriverForm(forms.Form): \"\"\"Viewset for add driver.\"\"\" national_id = forms.CharField(max_length=8)", "return self.regno class DriverForm(forms.Form): \"\"\"Viewset for add driver.\"\"\" national_id = forms.CharField(max_length=8) # first_name", "email = forms.CharField(max_length=15) # phone_number = forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status = forms.CharField(", "self.sacco def get_regno(self): \"\"\"Return the regno of the vehicle.\"\"\" return self.regno class DriverForm(forms.Form):", "# email = forms.CharField(max_length=15) # phone_number = forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status =", "vehicle.\"\"\" return self.regno class DriverForm(forms.Form): \"\"\"Viewset for add driver.\"\"\" national_id = forms.CharField(max_length=8) #", "('Suspended for the time being')), ('Blacklisted', ('Blacklisted from operating')) ] class VehicleForm(forms.Form): #", "max_length=100) regno = forms.CharField(label=\"Registration Number\", max_length=7) # def get_sacco(self): # \"\"\"Return the name", "= forms.CharField(max_length=10) # email = forms.CharField(max_length=15) # phone_number = forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\"", "] class VehicleForm(forms.Form): # sacco = forms.CharField(label=\"Sacco\", max_length=100) regno = forms.CharField(label=\"Registration Number\", max_length=7)", "# phone_number = forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description", "= forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description = forms.CharField(widget=forms.Textarea)", "= [ ('Approved', ('Approved to operate')), ('Suspended', ('Suspended for the time being')), ('Blacklisted',", "= forms.CharField(max_length=8) # first_name = forms.CharField(max_length=10) # last_name = forms.CharField(max_length=10) # # sacco", "the sacco.\"\"\" # return self.sacco def get_regno(self): \"\"\"Return the regno of the vehicle.\"\"\"", "of the vehicle.\"\"\" return self.regno class DriverForm(forms.Form): \"\"\"Viewset for add driver.\"\"\" national_id =", "('Approved', ('Approved to operate')), ('Suspended', ('Suspended for the time being')), ('Blacklisted', ('Blacklisted from", "# def get_sacco(self): # \"\"\"Return the name of the sacco.\"\"\" # return self.sacco", "# sacco = forms.CharField(max_length=10) # email = forms.CharField(max_length=15) # phone_number = forms.CharField(max_length=12) class", "SACCO_DRIVER_STATUS_OPTIONS = [ ('Approved', ('Approved to operate')), ('Suspended', ('Suspended for the time being')),", "forms.CharField(label=\"Sacco\", max_length=100) regno = forms.CharField(label=\"Registration Number\", max_length=7) # def get_sacco(self): # \"\"\"Return the", "= forms.CharField(max_length=10) # last_name = forms.CharField(max_length=10) # # sacco = forms.CharField(max_length=10) # email", "def get_sacco(self): # \"\"\"Return the name of the sacco.\"\"\" # return self.sacco def", "regno = forms.CharField(label=\"Registration Number\", max_length=7) # def get_sacco(self): # \"\"\"Return the name of", "sacco.\"\"\" # return self.sacco def get_regno(self): \"\"\"Return the regno of the vehicle.\"\"\" return", "\"\"\"Return the regno of the vehicle.\"\"\" return self.regno class DriverForm(forms.Form): \"\"\"Viewset for add", "status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description = forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search for a", "django import forms SACCO_DRIVER_STATUS_OPTIONS = [ ('Approved', ('Approved to operate')), ('Suspended', ('Suspended for", "national_id = forms.CharField(max_length=8) # first_name = forms.CharField(max_length=10) # last_name = forms.CharField(max_length=10) # #", "class DriverForm(forms.Form): \"\"\"Viewset for add driver.\"\"\" national_id = forms.CharField(max_length=8) # first_name = forms.CharField(max_length=10)", "for add driver.\"\"\" national_id = forms.CharField(max_length=8) # first_name = forms.CharField(max_length=10) # last_name =", "first_name = forms.CharField(max_length=10) # last_name = forms.CharField(max_length=10) # # sacco = forms.CharField(max_length=10) #", "('Suspended', ('Suspended for the time being')), ('Blacklisted', ('Blacklisted from operating')) ] class VehicleForm(forms.Form):", "('Blacklisted from operating')) ] class VehicleForm(forms.Form): # sacco = forms.CharField(label=\"Sacco\", max_length=100) regno =", "the regno of the vehicle.\"\"\" return self.regno class DriverForm(forms.Form): \"\"\"Viewset for add driver.\"\"\"", "class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description = forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form):", "UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description = forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search", "to operate')), ('Suspended', ('Suspended for the time being')), ('Blacklisted', ('Blacklisted from operating')) ]", "\"\"\".\"\"\" status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description = forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search for", "# # sacco = forms.CharField(max_length=10) # email = forms.CharField(max_length=15) # phone_number = forms.CharField(max_length=12)", "('Approved to operate')), ('Suspended', ('Suspended for the time being')), ('Blacklisted', ('Blacklisted from operating'))", "get_regno(self): \"\"\"Return the regno of the vehicle.\"\"\" return self.regno class DriverForm(forms.Form): \"\"\"Viewset for", "sacco = forms.CharField(label=\"Sacco\", max_length=100) regno = forms.CharField(label=\"Registration Number\", max_length=7) # def get_sacco(self): #", "forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search for a driver.\"\"\" national_id = forms.CharField(max_length=10, help_text=\"Enter driver id\")", "DriverForm(forms.Form): \"\"\"Viewset for add driver.\"\"\" national_id = forms.CharField(max_length=8) # first_name = forms.CharField(max_length=10) #", "name of the sacco.\"\"\" # return self.sacco def get_regno(self): \"\"\"Return the regno of", "description = forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search for a driver.\"\"\" national_id = forms.CharField(max_length=10, help_text=\"Enter", "= forms.CharField(label=\"Sacco\", max_length=100) regno = forms.CharField(label=\"Registration Number\", max_length=7) # def get_sacco(self): # \"\"\"Return", "= forms.CharField(label=\"Registration Number\", max_length=7) # def get_sacco(self): # \"\"\"Return the name of the", "of the sacco.\"\"\" # return self.sacco def get_regno(self): \"\"\"Return the regno of the", "phone_number = forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description =", "from django import forms SACCO_DRIVER_STATUS_OPTIONS = [ ('Approved', ('Approved to operate')), ('Suspended', ('Suspended", "regno of the vehicle.\"\"\" return self.regno class DriverForm(forms.Form): \"\"\"Viewset for add driver.\"\"\" national_id", "self.regno class DriverForm(forms.Form): \"\"\"Viewset for add driver.\"\"\" national_id = forms.CharField(max_length=8) # first_name =", "for the time being')), ('Blacklisted', ('Blacklisted from operating')) ] class VehicleForm(forms.Form): # sacco", "driver.\"\"\" national_id = forms.CharField(max_length=8) # first_name = forms.CharField(max_length=10) # last_name = forms.CharField(max_length=10) #", "time being')), ('Blacklisted', ('Blacklisted from operating')) ] class VehicleForm(forms.Form): # sacco = forms.CharField(label=\"Sacco\",", "\"\"\"Return the name of the sacco.\"\"\" # return self.sacco def get_regno(self): \"\"\"Return the", "forms.CharField(max_length=10) # # sacco = forms.CharField(max_length=10) # email = forms.CharField(max_length=15) # phone_number =", "\"\"\"Viewset for add driver.\"\"\" national_id = forms.CharField(max_length=8) # first_name = forms.CharField(max_length=10) # last_name", "forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description = forms.CharField(widget=forms.Textarea) class", "operating')) ] class VehicleForm(forms.Form): # sacco = forms.CharField(label=\"Sacco\", max_length=100) regno = forms.CharField(label=\"Registration Number\",", "forms.CharField(max_length=10) # email = forms.CharField(max_length=15) # phone_number = forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status", "VehicleForm(forms.Form): # sacco = forms.CharField(label=\"Sacco\", max_length=100) regno = forms.CharField(label=\"Registration Number\", max_length=7) # def", "class VehicleForm(forms.Form): # sacco = forms.CharField(label=\"Sacco\", max_length=100) regno = forms.CharField(label=\"Registration Number\", max_length=7) #", "= forms.CharField(max_length=10) # # sacco = forms.CharField(max_length=10) # email = forms.CharField(max_length=15) # phone_number", "# sacco = forms.CharField(label=\"Sacco\", max_length=100) regno = forms.CharField(label=\"Registration Number\", max_length=7) # def get_sacco(self):", "[ ('Approved', ('Approved to operate')), ('Suspended', ('Suspended for the time being')), ('Blacklisted', ('Blacklisted", "# last_name = forms.CharField(max_length=10) # # sacco = forms.CharField(max_length=10) # email = forms.CharField(max_length=15)", "forms.CharField(max_length=15) # phone_number = forms.CharField(max_length=12) class UpdateSaccoDriverStatusForm(forms.Form): \"\"\".\"\"\" status = forms.CharField( widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) )", "# return self.sacco def get_regno(self): \"\"\"Return the regno of the vehicle.\"\"\" return self.regno", "widget=forms.Select(choices=SACCO_DRIVER_STATUS_OPTIONS) ) description = forms.CharField(widget=forms.Textarea) class SearchDriverIdForm(forms.Form): \"\"\"Search for a driver.\"\"\" national_id =", "the name of the sacco.\"\"\" # return self.sacco def get_regno(self): \"\"\"Return the regno", "# first_name = forms.CharField(max_length=10) # last_name = forms.CharField(max_length=10) # # sacco = forms.CharField(max_length=10)", "forms.CharField(max_length=10) # last_name = forms.CharField(max_length=10) # # sacco = forms.CharField(max_length=10) # email =", "forms SACCO_DRIVER_STATUS_OPTIONS = [ ('Approved', ('Approved to operate')), ('Suspended', ('Suspended for the time", "from operating')) ] class VehicleForm(forms.Form): # sacco = forms.CharField(label=\"Sacco\", max_length=100) regno = forms.CharField(label=\"Registration", "operate')), ('Suspended', ('Suspended for the time being')), ('Blacklisted', ('Blacklisted from operating')) ] class", "add driver.\"\"\" national_id = forms.CharField(max_length=8) # first_name = forms.CharField(max_length=10) # last_name = forms.CharField(max_length=10)", "import forms SACCO_DRIVER_STATUS_OPTIONS = [ ('Approved', ('Approved to operate')), ('Suspended', ('Suspended for the", "return self.sacco def get_regno(self): \"\"\"Return the regno of the vehicle.\"\"\" return self.regno class" ]
[ "other.right is None elif other.left is None and other.right is None: return False", "Returns a list of all unique symmetrical trees with n internal nodes \"\"\"", "None: return False else: return self.left == other.left and self.right == other.right def", "return \"( )\" else: return \"( \" + str(self.left) + \" \" +", "of all unique trees with n internal nodes \"\"\" pass def allSymTrees(n): \"\"\"", "None, latter representing a leaf \"\"\" def __init__(self, left=None, right=None): super(Node, self).__init__() self.left", "other.left is None and other.right is None: return False else: return self.left ==", "all unique trees with n internal nodes \"\"\" pass def allSymTrees(n): \"\"\" Returns", "child, both may be a Node or both None, latter representing a leaf", "if self.left is None and self.right is None: return \"( )\" else: return", "self.right = right def __str__(self): \"\"\" Default inorder print \"\"\" if self.left is", "left and a right child, both may be a Node or both None,", "of all unique symmetrical trees with n internal nodes \"\"\" pass if __name__", "- a left and a right child, both may be a Node or", "be a Node or both None, latter representing a leaf \"\"\" def __init__(self,", "a list of all unique symmetrical trees with n internal nodes \"\"\" pass", "other.right def mirrorTree(node): \"\"\" Returns the mirror image of the tree rooted at", "def allTrees(n): \"\"\" Returns a list of all unique trees with n internal", "None elif other.left is None and other.right is None: return False else: return", "def mirrorTree(node): \"\"\" Returns the mirror image of the tree rooted at node", "objects - a left and a right child, both may be a Node", "self.left is None and self.right is None: return \"( )\" else: return \"(", "pass if __name__ == '__main__': for x in allSymTrees(int(input())): print(x) node = Node(Node(Node(),", "None and self.right is None: return other.left is None and other.right is None", "\"( \" + str(self.left) + \" \" + str(self.right) + \" )\" def", "None and other.right is None: return False else: return self.left == other.left and", "else: return self.left == other.left and self.right == other.right def mirrorTree(node): \"\"\" Returns", "None and other.right is None elif other.left is None and other.right is None:", "\"\"\" def __init__(self, left=None, right=None): super(Node, self).__init__() self.left = left self.right = right", "with n internal nodes \"\"\" pass def allSymTrees(n): \"\"\" Returns a list of", "__eq__(self, other): if self.left is None and self.right is None: return other.left is", "allTrees(n): \"\"\" Returns a list of all unique trees with n internal nodes", "left self.right = right def __str__(self): \"\"\" Default inorder print \"\"\" if self.left", "left=None, right=None): super(Node, self).__init__() self.left = left self.right = right def __str__(self): \"\"\"", "is None elif other.left is None and other.right is None: return False else:", "return False else: return self.left == other.left and self.right == other.right def mirrorTree(node):", "\"\"\" pass def allTrees(n): \"\"\" Returns a list of all unique trees with", "right child, both may be a Node or both None, latter representing a", "Node or both None, latter representing a leaf \"\"\" def __init__(self, left=None, right=None):", "self.right is None: return \"( )\" else: return \"( \" + str(self.left) +", "str(self.right) + \" )\" def __eq__(self, other): if self.left is None and self.right", "the mirror image of the tree rooted at node \"\"\" pass def allTrees(n):", "other.left and self.right == other.right def mirrorTree(node): \"\"\" Returns the mirror image of", "a list of all unique trees with n internal nodes \"\"\" pass def", "\"\"\" pass def allSymTrees(n): \"\"\" Returns a list of all unique symmetrical trees", "list of all unique trees with n internal nodes \"\"\" pass def allSymTrees(n):", "is None: return other.left is None and other.right is None elif other.left is", "latter representing a leaf \"\"\" def __init__(self, left=None, right=None): super(Node, self).__init__() self.left =", "return \"( \" + str(self.left) + \" \" + str(self.right) + \" )\"", "symmetrical trees with n internal nodes \"\"\" pass if __name__ == '__main__': for", "\"( )\" else: return \"( \" + str(self.left) + \" \" + str(self.right)", "__name__ == '__main__': for x in allSymTrees(int(input())): print(x) node = Node(Node(Node(), Node()), Node())", "+ \" \" + str(self.right) + \" )\" def __eq__(self, other): if self.left", "None: return other.left is None and other.right is None elif other.left is None", "\"\"\" if self.left is None and self.right is None: return \"( )\" else:", "\"\"\" Node contains two objects - a left and a right child, both", "+ \" )\" def __eq__(self, other): if self.left is None and self.right is", "super(Node, self).__init__() self.left = left self.right = right def __str__(self): \"\"\" Default inorder", "may be a Node or both None, latter representing a leaf \"\"\" def", "both None, latter representing a leaf \"\"\" def __init__(self, left=None, right=None): super(Node, self).__init__()", "with n internal nodes \"\"\" pass if __name__ == '__main__': for x in", "the tree rooted at node \"\"\" pass def allTrees(n): \"\"\" Returns a list", "def __init__(self, left=None, right=None): super(Node, self).__init__() self.left = left self.right = right def", "is None and self.right is None: return other.left is None and other.right is", "two objects - a left and a right child, both may be a", "inorder print \"\"\" if self.left is None and self.right is None: return \"(", "\" + str(self.right) + \" )\" def __eq__(self, other): if self.left is None", "def allSymTrees(n): \"\"\" Returns a list of all unique symmetrical trees with n", "Node contains two objects - a left and a right child, both may", "== other.right def mirrorTree(node): \"\"\" Returns the mirror image of the tree rooted", "a leaf \"\"\" def __init__(self, left=None, right=None): super(Node, self).__init__() self.left = left self.right", "= right def __str__(self): \"\"\" Default inorder print \"\"\" if self.left is None", "\"\"\" Returns a list of all unique trees with n internal nodes \"\"\"", "self.left == other.left and self.right == other.right def mirrorTree(node): \"\"\" Returns the mirror", "Default inorder print \"\"\" if self.left is None and self.right is None: return", "self.right == other.right def mirrorTree(node): \"\"\" Returns the mirror image of the tree", "str(self.left) + \" \" + str(self.right) + \" )\" def __eq__(self, other): if", "other.right is None: return False else: return self.left == other.left and self.right ==", "n internal nodes \"\"\" pass def allSymTrees(n): \"\"\" Returns a list of all", "of the tree rooted at node \"\"\" pass def allTrees(n): \"\"\" Returns a", "nodes \"\"\" pass def allSymTrees(n): \"\"\" Returns a list of all unique symmetrical", "__str__(self): \"\"\" Default inorder print \"\"\" if self.left is None and self.right is", "internal nodes \"\"\" pass if __name__ == '__main__': for x in allSymTrees(int(input())): print(x)", "<gh_stars>0 class Node(object): \"\"\" Node contains two objects - a left and a", "trees with n internal nodes \"\"\" pass if __name__ == '__main__': for x", "print \"\"\" if self.left is None and self.right is None: return \"( )\"", "pass def allSymTrees(n): \"\"\" Returns a list of all unique symmetrical trees with", "mirrorTree(node): \"\"\" Returns the mirror image of the tree rooted at node \"\"\"", "self.left = left self.right = right def __str__(self): \"\"\" Default inorder print \"\"\"", "\"\"\" Default inorder print \"\"\" if self.left is None and self.right is None:", "def __eq__(self, other): if self.left is None and self.right is None: return other.left", "elif other.left is None and other.right is None: return False else: return self.left", "if __name__ == '__main__': for x in allSymTrees(int(input())): print(x) node = Node(Node(Node(), Node()),", "\" + str(self.left) + \" \" + str(self.right) + \" )\" def __eq__(self,", "unique symmetrical trees with n internal nodes \"\"\" pass if __name__ == '__main__':", "and a right child, both may be a Node or both None, latter", "n internal nodes \"\"\" pass if __name__ == '__main__': for x in allSymTrees(int(input())):", "\" \" + str(self.right) + \" )\" def __eq__(self, other): if self.left is", ")\" def __eq__(self, other): if self.left is None and self.right is None: return", "and other.right is None elif other.left is None and other.right is None: return", "Returns a list of all unique trees with n internal nodes \"\"\" pass", "Returns the mirror image of the tree rooted at node \"\"\" pass def", "\"\"\" Returns the mirror image of the tree rooted at node \"\"\" pass", "contains two objects - a left and a right child, both may be", "both may be a Node or both None, latter representing a leaf \"\"\"", "== other.left and self.right == other.right def mirrorTree(node): \"\"\" Returns the mirror image", "class Node(object): \"\"\" Node contains two objects - a left and a right", "if self.left is None and self.right is None: return other.left is None and", "pass def allTrees(n): \"\"\" Returns a list of all unique trees with n", "nodes \"\"\" pass if __name__ == '__main__': for x in allSymTrees(int(input())): print(x) node", "leaf \"\"\" def __init__(self, left=None, right=None): super(Node, self).__init__() self.left = left self.right =", "False else: return self.left == other.left and self.right == other.right def mirrorTree(node): \"\"\"", "self.left is None and self.right is None: return other.left is None and other.right", "else: return \"( \" + str(self.left) + \" \" + str(self.right) + \"", "\"\"\" Returns a list of all unique symmetrical trees with n internal nodes", "tree rooted at node \"\"\" pass def allTrees(n): \"\"\" Returns a list of", "+ str(self.left) + \" \" + str(self.right) + \" )\" def __eq__(self, other):", "image of the tree rooted at node \"\"\" pass def allTrees(n): \"\"\" Returns", "and self.right is None: return other.left is None and other.right is None elif", "list of all unique symmetrical trees with n internal nodes \"\"\" pass if", "None: return \"( )\" else: return \"( \" + str(self.left) + \" \"", ")\" else: return \"( \" + str(self.left) + \" \" + str(self.right) +", "a left and a right child, both may be a Node or both", "internal nodes \"\"\" pass def allSymTrees(n): \"\"\" Returns a list of all unique", "a Node or both None, latter representing a leaf \"\"\" def __init__(self, left=None,", "right=None): super(Node, self).__init__() self.left = left self.right = right def __str__(self): \"\"\" Default", "representing a leaf \"\"\" def __init__(self, left=None, right=None): super(Node, self).__init__() self.left = left", "__init__(self, left=None, right=None): super(Node, self).__init__() self.left = left self.right = right def __str__(self):", "and self.right is None: return \"( )\" else: return \"( \" + str(self.left)", "unique trees with n internal nodes \"\"\" pass def allSymTrees(n): \"\"\" Returns a", "and self.right == other.right def mirrorTree(node): \"\"\" Returns the mirror image of the", "or both None, latter representing a leaf \"\"\" def __init__(self, left=None, right=None): super(Node,", "right def __str__(self): \"\"\" Default inorder print \"\"\" if self.left is None and", "Node(object): \"\"\" Node contains two objects - a left and a right child,", "is None and other.right is None elif other.left is None and other.right is", "self.right is None: return other.left is None and other.right is None elif other.left", "all unique symmetrical trees with n internal nodes \"\"\" pass if __name__ ==", "+ str(self.right) + \" )\" def __eq__(self, other): if self.left is None and", "return other.left is None and other.right is None elif other.left is None and", "other.left is None and other.right is None elif other.left is None and other.right", "self).__init__() self.left = left self.right = right def __str__(self): \"\"\" Default inorder print", "return self.left == other.left and self.right == other.right def mirrorTree(node): \"\"\" Returns the", "allSymTrees(n): \"\"\" Returns a list of all unique symmetrical trees with n internal", "rooted at node \"\"\" pass def allTrees(n): \"\"\" Returns a list of all", "at node \"\"\" pass def allTrees(n): \"\"\" Returns a list of all unique", "def __str__(self): \"\"\" Default inorder print \"\"\" if self.left is None and self.right", "None and self.right is None: return \"( )\" else: return \"( \" +", "is None: return \"( )\" else: return \"( \" + str(self.left) + \"", "is None and other.right is None: return False else: return self.left == other.left", "other): if self.left is None and self.right is None: return other.left is None", "mirror image of the tree rooted at node \"\"\" pass def allTrees(n): \"\"\"", "\" )\" def __eq__(self, other): if self.left is None and self.right is None:", "is None: return False else: return self.left == other.left and self.right == other.right", "node \"\"\" pass def allTrees(n): \"\"\" Returns a list of all unique trees", "\"\"\" pass if __name__ == '__main__': for x in allSymTrees(int(input())): print(x) node =", "and other.right is None: return False else: return self.left == other.left and self.right", "= left self.right = right def __str__(self): \"\"\" Default inorder print \"\"\" if", "is None and self.right is None: return \"( )\" else: return \"( \"", "== '__main__': for x in allSymTrees(int(input())): print(x) node = Node(Node(Node(), Node()), Node()) print(node)", "a right child, both may be a Node or both None, latter representing", "trees with n internal nodes \"\"\" pass def allSymTrees(n): \"\"\" Returns a list" ]
[ "surrounding_text() -> Optional[SurroundingText]: # TODO: If the voicemacs server is inactive, return nothing.", "import Context from user.emacs.utils.voicemacs import rpc_call from user.utils.formatting import SurroundingText context = Context()", "talon import Context from user.emacs.utils.voicemacs import rpc_call from user.utils.formatting import SurroundingText context =", "def surrounding_text() -> Optional[SurroundingText]: # TODO: If the voicemacs server is inactive, return", "Context() context.matches = r\"\"\" tag: user.emacs \"\"\" @context.action_class(\"self\") class UserActions: def surrounding_text() ->", "rpc_call from user.utils.formatting import SurroundingText context = Context() context.matches = r\"\"\" tag: user.emacs", "= r\"\"\" tag: user.emacs \"\"\" @context.action_class(\"self\") class UserActions: def surrounding_text() -> Optional[SurroundingText]: #", "If the voicemacs server is inactive, return nothing. raw_info = rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\",", "import Optional from talon import Context from user.emacs.utils.voicemacs import rpc_call from user.utils.formatting import", "\":chars-after\", 30000], # Use a very long timeout timeout=10, ) return SurroundingText( text_before=raw_info[\"text-before\"],", "from user.emacs.utils.voicemacs import rpc_call from user.utils.formatting import SurroundingText context = Context() context.matches =", "from typing import Optional from talon import Context from user.emacs.utils.voicemacs import rpc_call from", "from user.utils.formatting import SurroundingText context = Context() context.matches = r\"\"\" tag: user.emacs \"\"\"", "SurroundingText context = Context() context.matches = r\"\"\" tag: user.emacs \"\"\" @context.action_class(\"self\") class UserActions:", "user.utils.formatting import SurroundingText context = Context() context.matches = r\"\"\" tag: user.emacs \"\"\" @context.action_class(\"self\")", "-> Optional[SurroundingText]: # TODO: If the voicemacs server is inactive, return nothing. raw_info", "Optional[SurroundingText]: # TODO: If the voicemacs server is inactive, return nothing. raw_info =", "the voicemacs server is inactive, return nothing. raw_info = rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000,", "typing import Optional from talon import Context from user.emacs.utils.voicemacs import rpc_call from user.utils.formatting", "30000], # Use a very long timeout timeout=10, ) return SurroundingText( text_before=raw_info[\"text-before\"], text_after=raw_info[\"text-after\"]", "= Context() context.matches = r\"\"\" tag: user.emacs \"\"\" @context.action_class(\"self\") class UserActions: def surrounding_text()", "[\":chars-before\", 30000, \":chars-after\", 30000], # Use a very long timeout timeout=10, ) return", "raw_info = rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\", 30000], # Use a very long", "# Use a very long timeout timeout=10, ) return SurroundingText( text_before=raw_info[\"text-before\"], text_after=raw_info[\"text-after\"] )", "return nothing. raw_info = rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\", 30000], # Use a", "context = Context() context.matches = r\"\"\" tag: user.emacs \"\"\" @context.action_class(\"self\") class UserActions: def", "user.emacs \"\"\" @context.action_class(\"self\") class UserActions: def surrounding_text() -> Optional[SurroundingText]: # TODO: If the", "\"\"\" @context.action_class(\"self\") class UserActions: def surrounding_text() -> Optional[SurroundingText]: # TODO: If the voicemacs", "from talon import Context from user.emacs.utils.voicemacs import rpc_call from user.utils.formatting import SurroundingText context", "# TODO: If the voicemacs server is inactive, return nothing. raw_info = rpc_call(", "import SurroundingText context = Context() context.matches = r\"\"\" tag: user.emacs \"\"\" @context.action_class(\"self\") class", "server is inactive, return nothing. raw_info = rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\", 30000],", "Context from user.emacs.utils.voicemacs import rpc_call from user.utils.formatting import SurroundingText context = Context() context.matches", "is inactive, return nothing. raw_info = rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\", 30000], #", "inactive, return nothing. raw_info = rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\", 30000], # Use", "r\"\"\" tag: user.emacs \"\"\" @context.action_class(\"self\") class UserActions: def surrounding_text() -> Optional[SurroundingText]: # TODO:", "Optional from talon import Context from user.emacs.utils.voicemacs import rpc_call from user.utils.formatting import SurroundingText", "rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\", 30000], # Use a very long timeout timeout=10,", "\"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\", 30000], # Use a very long timeout timeout=10, )", "30000, \":chars-after\", 30000], # Use a very long timeout timeout=10, ) return SurroundingText(", "nothing. raw_info = rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\", 30000], # Use a very", "UserActions: def surrounding_text() -> Optional[SurroundingText]: # TODO: If the voicemacs server is inactive,", "tag: user.emacs \"\"\" @context.action_class(\"self\") class UserActions: def surrounding_text() -> Optional[SurroundingText]: # TODO: If", "voicemacs server is inactive, return nothing. raw_info = rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\",", "import rpc_call from user.utils.formatting import SurroundingText context = Context() context.matches = r\"\"\" tag:", "TODO: If the voicemacs server is inactive, return nothing. raw_info = rpc_call( \"voicemacs-surrounding-text\",", "= rpc_call( \"voicemacs-surrounding-text\", [\":chars-before\", 30000, \":chars-after\", 30000], # Use a very long timeout", "context.matches = r\"\"\" tag: user.emacs \"\"\" @context.action_class(\"self\") class UserActions: def surrounding_text() -> Optional[SurroundingText]:", "@context.action_class(\"self\") class UserActions: def surrounding_text() -> Optional[SurroundingText]: # TODO: If the voicemacs server", "user.emacs.utils.voicemacs import rpc_call from user.utils.formatting import SurroundingText context = Context() context.matches = r\"\"\"", "class UserActions: def surrounding_text() -> Optional[SurroundingText]: # TODO: If the voicemacs server is" ]
[ "side of an assignment. global_x = 1 def f(): global_x += 1 ___assertRaises(UnboundLocalError,", "as a local and assigned # unbound, because it is the left side", "unbound, because it is the left side of an assignment. global_x = 1", "f() is lifted as a local and assigned # unbound, because it is", "variable in f() is lifted as a local and assigned # unbound, because", "<gh_stars>10-100 # the variable in f() is lifted as a local and assigned", "lifted as a local and assigned # unbound, because it is the left", "# unbound, because it is the left side of an assignment. global_x =", "it is the left side of an assignment. global_x = 1 def f():", "because it is the left side of an assignment. global_x = 1 def", "is lifted as a local and assigned # unbound, because it is the", "left side of an assignment. global_x = 1 def f(): global_x += 1", "the variable in f() is lifted as a local and assigned # unbound,", "assigned # unbound, because it is the left side of an assignment. global_x", "is the left side of an assignment. global_x = 1 def f(): global_x", "and assigned # unbound, because it is the left side of an assignment.", "the left side of an assignment. global_x = 1 def f(): global_x +=", "# the variable in f() is lifted as a local and assigned #", "a local and assigned # unbound, because it is the left side of", "in f() is lifted as a local and assigned # unbound, because it", "local and assigned # unbound, because it is the left side of an", "of an assignment. global_x = 1 def f(): global_x += 1 ___assertRaises(UnboundLocalError, f)" ]
[ "4.855094), '1': (52.378281, 4.90007), '3': (52.375737, 4.896547), '2': (52.373634, 4.890289), '5': (52.376237, 4.90286),", ">= 0: # res += str(d['id']) + ' ' # else: # break", "addresses.append(address) m = int(input_file.readline()) for i in xrange(m): line = input_file.readline() lines =", "' # else: # break for i in xrange(len(distances) - 1): j =", "6371 # in km TRANSPORTS = { 'metro': 20, 'bike': 15, 'foot': 5", "== '__main__': arg = sys.argv[-1] # get input file name addresses, reqs =", "= degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians)", "to 0.2f if nearest > distance or (nearest == distance and int(near_id) >", "as input_file: n = int(input_file.readline()) for i in xrange(n): line = input_file.readline() lines", "point1_long_in_radians)) * EARTH_RADIUS) def degree2radians(degree): return float(degree * 2 * PI / 360)", "break if not req_addresses: break print res.strip() def nearby_attractions(addrs, reqs): for req in", "str(distances[j]['id']) + ' ' else: break print res.strip() def distance_between(point1, point2): point1_lat_in_radians =", "= round(distance_between(req['location'], addr['location']), 2) # round to 0.2f distances.append(addr) # addrs with distances", "distances[i], distances[j], d, time, total_time if total_time >= 0: res += str(distances[j]['id']) +", "' else: break if not req_addresses: break print res.strip() def nearby_attractions(addrs, reqs): for", "0.2f if nearest > distance or (nearest == distance and int(near_id) > int(addr_id)):", "total_time -= time print distances[i], distances[j], d, time, total_time if total_time >= 0:", "req['transport'] req_time = req['time'] req_addresses = {} for addr in addresses: req_addresses[addr['id']] =", "reqs): for req in reqs: req_point = req['location'] req_trans = req['transport'] req_time =", "distance = round(distance_between(req_point, addr_point), 2) # round to 0.2f if nearest > distance", "float(nearest / TRANSPORTS[req_trans] * 60) req_time -= time req_point = req_addresses.pop(near_id) # remove", "-= time req_point = req_addresses.pop(near_id) # remove point travelled print req_addresses, '----->', near_id,", "line = input_file.readline() lines = line.split() address = {'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))}", "0: res += str(distances[j]['id']) + ' ' else: break print res.strip() def distance_between(point1,", "break print res.strip() def distance_between(point1, point2): point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians", "i + 1 d = round(distance_between(distances[j]['location'], distances[i]['location']), 2) time = d / TRANSPORTS[req['transport']]", "str(d['id']) + ' ' # else: # break for i in xrange(len(distances) -", "near_id = addr_id time = float(nearest / TRANSPORTS[req_trans] * 60) req_time -= time", "in xrange(m): line = input_file.readline() lines = line.split() req = {'location': (float(lines[0]), float(lines[1])),", "else: break if not req_addresses: break print res.strip() def nearby_attractions(addrs, reqs): for req", "str(near_id) + ' ' else: break if not req_addresses: break print res.strip() def", "time print distances[i], distances[j], d, time, total_time if total_time >= 0: res +=", "math PI = 3.14159265359 EARTH_RADIUS = 6371 # in km TRANSPORTS = {", "* 2 * PI / 360) if __name__ == '__main__': arg = sys.argv[-1]", "def nearby_attractions(addrs, reqs): for req in reqs: distances = [] for addr in", "d / TRANSPORTS[req['transport']] * 60 # minutes total_time -= time print distances[i], distances[j],", "in req_addresses: addr_point = req_addresses[addr_id] distance = round(distance_between(req_point, addr_point), 2) # round to", "d = round(distance_between(distances[j]['location'], distances[i]['location']), 2) time = d / TRANSPORTS[req['transport']] * 60 #", "degree2radians(degree): return float(degree * 2 * PI / 360) if __name__ == '__main__':", "travelled print req_addresses, '----->', near_id, req_point, nearest, req_time, time if req_time >= 0:", "res.strip() def distance_between(point1, point2): point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0])", "= round(distance_between(distances[j]['location'], distances[i]['location']), 2) time = d / TRANSPORTS[req['transport']] * 60 # minutes", "= input_file.readline() lines = line.split() req = {'location': (float(lines[0]), float(lines[1])), 'transport': lines[2], 'time':", "4.892835), '14': (52.368832, 4.892744), '35': (52.342497, 4.855094), '1': (52.378281, 4.90007), '3': (52.375737, 4.896547),", "> int(addr_id)): nearest = distance near_id = addr_id time = float(nearest / TRANSPORTS[req_trans]", "i in xrange(len(distances) - 1): j = i + 1 d = round(distance_between(distances[j]['location'],", "res += str(near_id) + ' ' else: break if not req_addresses: break print", "print distances[i], distances[j], d, time, total_time if total_time >= 0: res += str(distances[j]['id'])", "= i + 1 d = round(distance_between(distances[j]['location'], distances[i]['location']), 2) time = d /", "req_addresses[addr_id] distance = round(distance_between(req_point, addr_point), 2) # round to 0.2f if nearest >", "req['time'] req_addresses = {} for addr in addresses: req_addresses[addr['id']] = addr['location'] res =", "+= str(d['id']) + ' ' # else: # break for i in xrange(len(distances)", "'7': (52.366537, 4.911348), '6': (52.367066, 4.893381)} ''' def nearby_attraction(addresses, reqs): for req in", "# total_time -= time # if total_time >= 0: # res += str(d['id'])", "reqs: req_point = req['location'] req_trans = req['transport'] req_time = req['time'] req_addresses = {}", "- 1): j = i + 1 d = round(distance_between(distances[j]['location'], distances[i]['location']), 2) time", "'location': (float(lines[1]), float(lines[2]))} addresses.append(address) m = int(input_file.readline()) for i in xrange(m): line =", "nearest = distance near_id = addr_id time = float(nearest / TRANSPORTS[req_trans] * 60)", "# minutes # total_time -= time # if total_time >= 0: # res", "print res.strip() def distance_between(point1, point2): point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians =", ">= 0: res += str(distances[j]['id']) + ' ' else: break print res.strip() def", "near_id, req_point, nearest, req_time, time if req_time >= 0: res += str(near_id) +", "/ TRANSPORTS[req['transport']] * 60 # minutes total_time -= time print distances[i], distances[j], d,", "* math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS) def", "2) # round to 0.2f if nearest > distance or (nearest == distance", "'2': (52.373634, 4.890289), '5': (52.376237, 4.90286), '4': (52.372995, 4.893096), '7': (52.366537, 4.911348), '6':", "(52.367066, 4.893381)} ''' def nearby_attraction(addresses, reqs): for req in reqs: req_point = req['location']", "return float(degree * 2 * PI / 360) if __name__ == '__main__': arg", "get input file name addresses, reqs = [], [] with open(arg, 'r') as", "(52.366537, 4.911348), '6': (52.367066, 4.893381)} ''' def nearby_attraction(addresses, reqs): for req in reqs:", "True: nearest = 10000000 # INT MAX near_id = '' for addr_id in", "to 0.2f distances.append(addr) # addrs with distances distances.append({'id': 0, 'location': req['location'], 'distance': 0.00})", "} ''' {'15': (52.357895, 4.892835), '14': (52.368832, 4.892744), '35': (52.342497, 4.855094), '1': (52.378281,", "+ ' ' else: break print res.strip() def distance_between(point1, point2): point1_lat_in_radians = degree2radians(point1[0])", "MAX near_id = '' for addr_id in req_addresses: addr_point = req_addresses[addr_id] distance =", "= 10000000 # INT MAX near_id = '' for addr_id in req_addresses: addr_point", "round(distance_between(distances[j]['location'], distances[i]['location']), 2) time = d / TRANSPORTS[req['transport']] * 60 # minutes total_time", "file name addresses, reqs = [], [] with open(arg, 'r') as input_file: n", "for i in xrange(m): line = input_file.readline() lines = line.split() req = {'location':", "15, 'foot': 5 } ''' {'15': (52.357895, 4.892835), '14': (52.368832, 4.892744), '35': (52.342497,", "== distance and int(near_id) > int(addr_id)): nearest = distance near_id = addr_id time", "= d['distance'] / TRANSPORTS[req['transport']] * 60 # minutes # total_time -= time #", "degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians)", "0.2f distances.append(addr) # addrs with distances distances.append({'id': 0, 'location': req['location'], 'distance': 0.00}) distances.sort(key=lambda", "math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS) def degree2radians(degree): return float(degree * 2 * PI", "4.893096), '7': (52.366537, 4.911348), '6': (52.367066, 4.893381)} ''' def nearby_attraction(addresses, reqs): for req", "= req_addresses.pop(near_id) # remove point travelled print req_addresses, '----->', near_id, req_point, nearest, req_time,", "addr['location'] res = '' while True: nearest = 10000000 # INT MAX near_id", "sys.argv[-1] # get input file name addresses, reqs = [], [] with open(arg,", "# -*- coding: utf-8 -*- __author__ = 'tanchao' import sys import math PI", "# print distances total_time, res = req['time'], '' # for d in distances:", "for i in xrange(len(distances) - 1): j = i + 1 d =", "def distance_between(point1, point2): point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians", "import sys import math PI = 3.14159265359 EARTH_RADIUS = 6371 # in km", "''' def nearby_attraction(addresses, reqs): for req in reqs: req_point = req['location'] req_trans =", "(nearest == distance and int(near_id) > int(addr_id)): nearest = distance near_id = addr_id", "' ' else: break if not req_addresses: break print res.strip() def nearby_attractions(addrs, reqs):", "EARTH_RADIUS = 6371 # in km TRANSPORTS = { 'metro': 20, 'bike': 15,", "+= str(near_id) + ' ' else: break if not req_addresses: break print res.strip()", "if total_time >= 0: res += str(distances[j]['id']) + ' ' else: break print", "with open(arg, 'r') as input_file: n = int(input_file.readline()) for i in xrange(n): line", "total_time -= time # if total_time >= 0: # res += str(d['id']) +", "# time = d['distance'] / TRANSPORTS[req['transport']] * 60 # minutes # total_time -=", "(52.378281, 4.90007), '3': (52.375737, 4.896547), '2': (52.373634, 4.890289), '5': (52.376237, 4.90286), '4': (52.372995,", "addresses, reqs = [], [] with open(arg, 'r') as input_file: n = int(input_file.readline())", "with distances distances.append({'id': 0, 'location': req['location'], 'distance': 0.00}) distances.sort(key=lambda distance: (distance['distance'], distance['id'])) #", "'' for addr_id in req_addresses: addr_point = req_addresses[addr_id] distance = round(distance_between(req_point, addr_point), 2)", "total_time >= 0: res += str(distances[j]['id']) + ' ' else: break print res.strip()", "id # print distances total_time, res = req['time'], '' # for d in", "3.14159265359 EARTH_RADIUS = 6371 # in km TRANSPORTS = { 'metro': 20, 'bike':", "+ 1 d = round(distance_between(distances[j]['location'], distances[i]['location']), 2) time = d / TRANSPORTS[req['transport']] *", "4.911348), '6': (52.367066, 4.893381)} ''' def nearby_attraction(addresses, reqs): for req in reqs: req_point", "for addr_id in req_addresses: addr_point = req_addresses[addr_id] distance = round(distance_between(req_point, addr_point), 2) #", "# round to 0.2f distances.append(addr) # addrs with distances distances.append({'id': 0, 'location': req['location'],", "for addr in addrs: addr['distance'] = round(distance_between(req['location'], addr['location']), 2) # round to 0.2f", "req_point = req_addresses.pop(near_id) # remove point travelled print req_addresses, '----->', near_id, req_point, nearest,", "round(distance_between(req_point, addr_point), 2) # round to 0.2f if nearest > distance or (nearest", "point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians)", "float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS)", "1): j = i + 1 d = round(distance_between(distances[j]['location'], distances[i]['location']), 2) time =", "# get input file name addresses, reqs = [], [] with open(arg, 'r')", "for req in reqs: distances = [] for addr in addrs: addr['distance'] =", "address = {'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))} addresses.append(address) m = int(input_file.readline()) for i", "= req['location'] req_trans = req['transport'] req_time = req['time'] req_addresses = {} for addr", "for d in distances: # time = d['distance'] / TRANSPORTS[req['transport']] * 60 #", "= sys.argv[-1] # get input file name addresses, reqs = [], [] with", "= distance near_id = addr_id time = float(nearest / TRANSPORTS[req_trans] * 60) req_time", "addrs with distances distances.append({'id': 0, 'location': req['location'], 'distance': 0.00}) distances.sort(key=lambda distance: (distance['distance'], distance['id']))", "= req_addresses[addr_id] distance = round(distance_between(req_point, addr_point), 2) # round to 0.2f if nearest", "input_file.readline() lines = line.split() req = {'location': (float(lines[0]), float(lines[1])), 'transport': lines[2], 'time': int(lines[3])}", "2) # round to 0.2f distances.append(addr) # addrs with distances distances.append({'id': 0, 'location':", "# for d in distances: # time = d['distance'] / TRANSPORTS[req['transport']] * 60", "d in distances: # time = d['distance'] / TRANSPORTS[req['transport']] * 60 # minutes", "point2): point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1])", "req in reqs: distances = [] for addr in addrs: addr['distance'] = round(distance_between(req['location'],", "distances total_time, res = req['time'], '' # for d in distances: # time", "# minutes total_time -= time print distances[i], distances[j], d, time, total_time if total_time", "if __name__ == '__main__': arg = sys.argv[-1] # get input file name addresses,", "/ TRANSPORTS[req_trans] * 60) req_time -= time req_point = req_addresses.pop(near_id) # remove point", "= input_file.readline() lines = line.split() address = {'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))} addresses.append(address)", "* math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS) def degree2radians(degree): return float(degree *", "(52.375737, 4.896547), '2': (52.373634, 4.890289), '5': (52.376237, 4.90286), '4': (52.372995, 4.893096), '7': (52.366537,", "TRANSPORTS[req['transport']] * 60 # minutes # total_time -= time # if total_time >=", "by id # print distances total_time, res = req['time'], '' # for d", "int(input_file.readline()) for i in xrange(n): line = input_file.readline() lines = line.split() address =", "req_time, time if req_time >= 0: res += str(near_id) + ' ' else:", "else: break print res.strip() def distance_between(point1, point2): point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1])", "input file name addresses, reqs = [], [] with open(arg, 'r') as input_file:", "'3': (52.375737, 4.896547), '2': (52.373634, 4.890289), '5': (52.376237, 4.90286), '4': (52.372995, 4.893096), '7':", "+= str(distances[j]['id']) + ' ' else: break print res.strip() def distance_between(point1, point2): point1_lat_in_radians", "distances distances.append({'id': 0, 'location': req['location'], 'distance': 0.00}) distances.sort(key=lambda distance: (distance['distance'], distance['id'])) # sort", "nearby_attraction(addresses, reqs): for req in reqs: req_point = req['location'] req_trans = req['transport'] req_time", "+ ' ' # else: # break for i in xrange(len(distances) - 1):", "line.split() address = {'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))} addresses.append(address) m = int(input_file.readline()) for", "in reqs: req_point = req['location'] req_trans = req['transport'] req_time = req['time'] req_addresses =", "round to 0.2f distances.append(addr) # addrs with distances distances.append({'id': 0, 'location': req['location'], 'distance':", "lines = line.split() address = {'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))} addresses.append(address) m =", "addr in addresses: req_addresses[addr['id']] = addr['location'] res = '' while True: nearest =", "res += str(d['id']) + ' ' # else: # break for i in", "2) time = d / TRANSPORTS[req['transport']] * 60 # minutes total_time -= time", "''' {'15': (52.357895, 4.892835), '14': (52.368832, 4.892744), '35': (52.342497, 4.855094), '1': (52.378281, 4.90007),", "2 * PI / 360) if __name__ == '__main__': arg = sys.argv[-1] #", "= '' for addr_id in req_addresses: addr_point = req_addresses[addr_id] distance = round(distance_between(req_point, addr_point),", "sys import math PI = 3.14159265359 EARTH_RADIUS = 6371 # in km TRANSPORTS", "INT MAX near_id = '' for addr_id in req_addresses: addr_point = req_addresses[addr_id] distance", "utf-8 -*- __author__ = 'tanchao' import sys import math PI = 3.14159265359 EARTH_RADIUS", "'35': (52.342497, 4.855094), '1': (52.378281, 4.90007), '3': (52.375737, 4.896547), '2': (52.373634, 4.890289), '5':", "req_addresses = {} for addr in addresses: req_addresses[addr['id']] = addr['location'] res = ''", "distance or (nearest == distance and int(near_id) > int(addr_id)): nearest = distance near_id", "time req_point = req_addresses.pop(near_id) # remove point travelled print req_addresses, '----->', near_id, req_point,", "-*- __author__ = 'tanchao' import sys import math PI = 3.14159265359 EARTH_RADIUS =", "# round to 0.2f if nearest > distance or (nearest == distance and", "coding: utf-8 -*- __author__ = 'tanchao' import sys import math PI = 3.14159265359", "= degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians -", "= addr['location'] res = '' while True: nearest = 10000000 # INT MAX", "= 6371 # in km TRANSPORTS = { 'metro': 20, 'bike': 15, 'foot':", "# break for i in xrange(len(distances) - 1): j = i + 1", "xrange(len(distances) - 1): j = i + 1 d = round(distance_between(distances[j]['location'], distances[i]['location']), 2)", "km TRANSPORTS = { 'metro': 20, 'bike': 15, 'foot': 5 } ''' {'15':", "reqs): for req in reqs: distances = [] for addr in addrs: addr['distance']", "addr_point), 2) # round to 0.2f if nearest > distance or (nearest ==", "remove point travelled print req_addresses, '----->', near_id, req_point, nearest, req_time, time if req_time", "addrs: addr['distance'] = round(distance_between(req['location'], addr['location']), 2) # round to 0.2f distances.append(addr) # addrs", "# sort by id # print distances total_time, res = req['time'], '' #", "4.890289), '5': (52.376237, 4.90286), '4': (52.372995, 4.893096), '7': (52.366537, 4.911348), '6': (52.367066, 4.893381)}", "def degree2radians(degree): return float(degree * 2 * PI / 360) if __name__ ==", "20, 'bike': 15, 'foot': 5 } ''' {'15': (52.357895, 4.892835), '14': (52.368832, 4.892744),", "__author__ = 'tanchao' import sys import math PI = 3.14159265359 EARTH_RADIUS = 6371", "/ 360) if __name__ == '__main__': arg = sys.argv[-1] # get input file", "' ' # else: # break for i in xrange(len(distances) - 1): j", "distances[j], d, time, total_time if total_time >= 0: res += str(distances[j]['id']) + '", "TRANSPORTS[req['transport']] * 60 # minutes total_time -= time print distances[i], distances[j], d, time,", "'foot': 5 } ''' {'15': (52.357895, 4.892835), '14': (52.368832, 4.892744), '35': (52.342497, 4.855094),", "[] with open(arg, 'r') as input_file: n = int(input_file.readline()) for i in xrange(n):", "nearest, req_time, time if req_time >= 0: res += str(near_id) + ' '", "open(arg, 'r') as input_file: n = int(input_file.readline()) for i in xrange(n): line =", "degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians - point1_long_in_radians))", "= req['time'] req_addresses = {} for addr in addresses: req_addresses[addr['id']] = addr['location'] res", "' ' else: break print res.strip() def distance_between(point1, point2): point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians", ">= 0: res += str(near_id) + ' ' else: break if not req_addresses:", "math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS) def degree2radians(degree): return float(degree * 2", "addr in addrs: addr['distance'] = round(distance_between(req['location'], addr['location']), 2) # round to 0.2f distances.append(addr)", "'1': (52.378281, 4.90007), '3': (52.375737, 4.896547), '2': (52.373634, 4.890289), '5': (52.376237, 4.90286), '4':", "* 60 # minutes # total_time -= time # if total_time >= 0:", "* math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS) def degree2radians(degree): return float(degree * 2 *", "in km TRANSPORTS = { 'metro': 20, 'bike': 15, 'foot': 5 } '''", "in xrange(n): line = input_file.readline() lines = line.split() address = {'id': int(lines[0]), 'location':", "4.892744), '35': (52.342497, 4.855094), '1': (52.378281, 4.90007), '3': (52.375737, 4.896547), '2': (52.373634, 4.890289),", "in distances: # time = d['distance'] / TRANSPORTS[req['transport']] * 60 # minutes #", "= [] for addr in addrs: addr['distance'] = round(distance_between(req['location'], addr['location']), 2) # round", "'----->', near_id, req_point, nearest, req_time, time if req_time >= 0: res += str(near_id)", "for req in reqs: req_point = req['location'] req_trans = req['transport'] req_time = req['time']", "= int(input_file.readline()) for i in xrange(m): line = input_file.readline() lines = line.split() req", "+ math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS) def degree2radians(degree): return", "d, time, total_time if total_time >= 0: res += str(distances[j]['id']) + ' '", "0: res += str(near_id) + ' ' else: break if not req_addresses: break", "(52.372995, 4.893096), '7': (52.366537, 4.911348), '6': (52.367066, 4.893381)} ''' def nearby_attraction(addresses, reqs): for", "/ TRANSPORTS[req['transport']] * 60 # minutes # total_time -= time # if total_time", "round(distance_between(req['location'], addr['location']), 2) # round to 0.2f distances.append(addr) # addrs with distances distances.append({'id':", "[], [] with open(arg, 'r') as input_file: n = int(input_file.readline()) for i in", "in reqs: distances = [] for addr in addrs: addr['distance'] = round(distance_between(req['location'], addr['location']),", "distance and int(near_id) > int(addr_id)): nearest = distance near_id = addr_id time =", "= 3.14159265359 EARTH_RADIUS = 6371 # in km TRANSPORTS = { 'metro': 20,", "# in km TRANSPORTS = { 'metro': 20, 'bike': 15, 'foot': 5 }", "for addr in addresses: req_addresses[addr['id']] = addr['location'] res = '' while True: nearest", "{} for addr in addresses: req_addresses[addr['id']] = addr['location'] res = '' while True:", "break print res.strip() def nearby_attractions(addrs, reqs): for req in reqs: distances = []", "= [], [] with open(arg, 'r') as input_file: n = int(input_file.readline()) for i", "or (nearest == distance and int(near_id) > int(addr_id)): nearest = distance near_id =", "= { 'metro': 20, 'bike': 15, 'foot': 5 } ''' {'15': (52.357895, 4.892835),", "degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) *", "line = input_file.readline() lines = line.split() req = {'location': (float(lines[0]), float(lines[1])), 'transport': lines[2],", "'14': (52.368832, 4.892744), '35': (52.342497, 4.855094), '1': (52.378281, 4.90007), '3': (52.375737, 4.896547), '2':", "req in reqs: req_point = req['location'] req_trans = req['transport'] req_time = req['time'] req_addresses", "math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS) def degree2radians(degree):", "i in xrange(m): line = input_file.readline() lines = line.split() req = {'location': (float(lines[0]),", "point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) *", "EARTH_RADIUS) def degree2radians(degree): return float(degree * 2 * PI / 360) if __name__", "n = int(input_file.readline()) for i in xrange(n): line = input_file.readline() lines = line.split()", "req_time >= 0: res += str(near_id) + ' ' else: break if not", "(52.368832, 4.892744), '35': (52.342497, 4.855094), '1': (52.378281, 4.90007), '3': (52.375737, 4.896547), '2': (52.373634,", "if req_time >= 0: res += str(near_id) + ' ' else: break if", "in xrange(len(distances) - 1): j = i + 1 d = round(distance_between(distances[j]['location'], distances[i]['location']),", "PI / 360) if __name__ == '__main__': arg = sys.argv[-1] # get input", "j = i + 1 d = round(distance_between(distances[j]['location'], distances[i]['location']), 2) time = d", "req_point, nearest, req_time, time if req_time >= 0: res += str(near_id) + '", "distances.append(addr) # addrs with distances distances.append({'id': 0, 'location': req['location'], 'distance': 0.00}) distances.sort(key=lambda distance:", "'4': (52.372995, 4.893096), '7': (52.366537, 4.911348), '6': (52.367066, 4.893381)} ''' def nearby_attraction(addresses, reqs):", "req_time -= time req_point = req_addresses.pop(near_id) # remove point travelled print req_addresses, '----->',", "break for i in xrange(len(distances) - 1): j = i + 1 d", "0: # res += str(d['id']) + ' ' # else: # break for", "distance_between(point1, point2): point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians =", "(distance['distance'], distance['id'])) # sort by id # print distances total_time, res = req['time'],", "req_addresses, '----->', near_id, req_point, nearest, req_time, time if req_time >= 0: res +=", "= {'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))} addresses.append(address) m = int(input_file.readline()) for i in", "(52.373634, 4.890289), '5': (52.376237, 4.90286), '4': (52.372995, 4.893096), '7': (52.366537, 4.911348), '6': (52.367066,", "time, total_time if total_time >= 0: res += str(distances[j]['id']) + ' ' else:", "[] for addr in addrs: addr['distance'] = round(distance_between(req['location'], addr['location']), 2) # round to", "4.896547), '2': (52.373634, 4.890289), '5': (52.376237, 4.90286), '4': (52.372995, 4.893096), '7': (52.366537, 4.911348),", "arg = sys.argv[-1] # get input file name addresses, reqs = [], []", "m = int(input_file.readline()) for i in xrange(m): line = input_file.readline() lines = line.split()", "import math PI = 3.14159265359 EARTH_RADIUS = 6371 # in km TRANSPORTS =", "# if total_time >= 0: # res += str(d['id']) + ' ' #", "#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'tanchao' import sys import", "distance near_id = addr_id time = float(nearest / TRANSPORTS[req_trans] * 60) req_time -=", "60) req_time -= time req_point = req_addresses.pop(near_id) # remove point travelled print req_addresses,", "distances: # time = d['distance'] / TRANSPORTS[req['transport']] * 60 # minutes # total_time", "total_time, res = req['time'], '' # for d in distances: # time =", "4.90007), '3': (52.375737, 4.896547), '2': (52.373634, 4.890289), '5': (52.376237, 4.90286), '4': (52.372995, 4.893096),", "point travelled print req_addresses, '----->', near_id, req_point, nearest, req_time, time if req_time >=", "addr_id time = float(nearest / TRANSPORTS[req_trans] * 60) req_time -= time req_point =", "time = d / TRANSPORTS[req['transport']] * 60 # minutes total_time -= time print", "# addrs with distances distances.append({'id': 0, 'location': req['location'], 'distance': 0.00}) distances.sort(key=lambda distance: (distance['distance'],", "'__main__': arg = sys.argv[-1] # get input file name addresses, reqs = [],", "# INT MAX near_id = '' for addr_id in req_addresses: addr_point = req_addresses[addr_id]", "point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1]) return", "if not req_addresses: break print res.strip() def nearby_attractions(addrs, reqs): for req in reqs:", "name addresses, reqs = [], [] with open(arg, 'r') as input_file: n =", "= d / TRANSPORTS[req['transport']] * 60 # minutes total_time -= time print distances[i],", "float(degree * 2 * PI / 360) if __name__ == '__main__': arg =", "- point1_long_in_radians)) * EARTH_RADIUS) def degree2radians(degree): return float(degree * 2 * PI /", "return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians - point1_long_in_radians)) *", "req_time = req['time'] req_addresses = {} for addr in addresses: req_addresses[addr['id']] = addr['location']", "# else: # break for i in xrange(len(distances) - 1): j = i", "req_addresses: addr_point = req_addresses[addr_id] distance = round(distance_between(req_point, addr_point), 2) # round to 0.2f", "time # if total_time >= 0: # res += str(d['id']) + ' '", "sort by id # print distances total_time, res = req['time'], '' # for", "# res += str(d['id']) + ' ' # else: # break for i", "res += str(distances[j]['id']) + ' ' else: break print res.strip() def distance_between(point1, point2):", "= 'tanchao' import sys import math PI = 3.14159265359 EARTH_RADIUS = 6371 #", "int(addr_id)): nearest = distance near_id = addr_id time = float(nearest / TRANSPORTS[req_trans] *", "{'15': (52.357895, 4.892835), '14': (52.368832, 4.892744), '35': (52.342497, 4.855094), '1': (52.378281, 4.90007), '3':", "{'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))} addresses.append(address) m = int(input_file.readline()) for i in xrange(m):", "int(near_id) > int(addr_id)): nearest = distance near_id = addr_id time = float(nearest /", "<filename>archive/hackerrank/nearby_attraction.py #!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'tanchao' import sys", "int(input_file.readline()) for i in xrange(m): line = input_file.readline() lines = line.split() req =", "print distances total_time, res = req['time'], '' # for d in distances: #", "req['time'], '' # for d in distances: # time = d['distance'] / TRANSPORTS[req['transport']]", "= degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians)", "int(lines[0]), 'location': (float(lines[1]), float(lines[2]))} addresses.append(address) m = int(input_file.readline()) for i in xrange(m): line", "(52.357895, 4.892835), '14': (52.368832, 4.892744), '35': (52.342497, 4.855094), '1': (52.378281, 4.90007), '3': (52.375737,", "4.893381)} ''' def nearby_attraction(addresses, reqs): for req in reqs: req_point = req['location'] req_trans", "* 60 # minutes total_time -= time print distances[i], distances[j], d, time, total_time", "__name__ == '__main__': arg = sys.argv[-1] # get input file name addresses, reqs", "distances = [] for addr in addrs: addr['distance'] = round(distance_between(req['location'], addr['location']), 2) #", "res = '' while True: nearest = 10000000 # INT MAX near_id =", "print res.strip() def nearby_attractions(addrs, reqs): for req in reqs: distances = [] for", "'metro': 20, 'bike': 15, 'foot': 5 } ''' {'15': (52.357895, 4.892835), '14': (52.368832,", "TRANSPORTS[req_trans] * 60) req_time -= time req_point = req_addresses.pop(near_id) # remove point travelled", "if total_time >= 0: # res += str(d['id']) + ' ' # else:", "in addrs: addr['distance'] = round(distance_between(req['location'], addr['location']), 2) # round to 0.2f distances.append(addr) #", "'6': (52.367066, 4.893381)} ''' def nearby_attraction(addresses, reqs): for req in reqs: req_point =", "res.strip() def nearby_attractions(addrs, reqs): for req in reqs: distances = [] for addr", "= '' while True: nearest = 10000000 # INT MAX near_id = ''", "> distance or (nearest == distance and int(near_id) > int(addr_id)): nearest = distance", "res = req['time'], '' # for d in distances: # time = d['distance']", "* 60) req_time -= time req_point = req_addresses.pop(near_id) # remove point travelled print", "distance['id'])) # sort by id # print distances total_time, res = req['time'], ''", "1 d = round(distance_between(distances[j]['location'], distances[i]['location']), 2) time = d / TRANSPORTS[req['transport']] * 60", "TRANSPORTS = { 'metro': 20, 'bike': 15, 'foot': 5 } ''' {'15': (52.357895,", "point2_long_in_radians = degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) + math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians", "'' # for d in distances: # time = d['distance'] / TRANSPORTS[req['transport']] *", "req['location'], 'distance': 0.00}) distances.sort(key=lambda distance: (distance['distance'], distance['id'])) # sort by id # print", "nearest > distance or (nearest == distance and int(near_id) > int(addr_id)): nearest =", "= req['time'], '' # for d in distances: # time = d['distance'] /", "input_file: n = int(input_file.readline()) for i in xrange(n): line = input_file.readline() lines =", "= float(nearest / TRANSPORTS[req_trans] * 60) req_time -= time req_point = req_addresses.pop(near_id) #", "60 # minutes total_time -= time print distances[i], distances[j], d, time, total_time if", "'tanchao' import sys import math PI = 3.14159265359 EARTH_RADIUS = 6371 # in", "'5': (52.376237, 4.90286), '4': (52.372995, 4.893096), '7': (52.366537, 4.911348), '6': (52.367066, 4.893381)} '''", "req_addresses[addr['id']] = addr['location'] res = '' while True: nearest = 10000000 # INT", "'bike': 15, 'foot': 5 } ''' {'15': (52.357895, 4.892835), '14': (52.368832, 4.892744), '35':", "req['location'] req_trans = req['transport'] req_time = req['time'] req_addresses = {} for addr in", "-*- coding: utf-8 -*- __author__ = 'tanchao' import sys import math PI =", "5 } ''' {'15': (52.357895, 4.892835), '14': (52.368832, 4.892744), '35': (52.342497, 4.855094), '1':", "(52.376237, 4.90286), '4': (52.372995, 4.893096), '7': (52.366537, 4.911348), '6': (52.367066, 4.893381)} ''' def", "4.90286), '4': (52.372995, 4.893096), '7': (52.366537, 4.911348), '6': (52.367066, 4.893381)} ''' def nearby_attraction(addresses,", "near_id = '' for addr_id in req_addresses: addr_point = req_addresses[addr_id] distance = round(distance_between(req_point,", "time = float(nearest / TRANSPORTS[req_trans] * 60) req_time -= time req_point = req_addresses.pop(near_id)", "for i in xrange(n): line = input_file.readline() lines = line.split() address = {'id':", "360) if __name__ == '__main__': arg = sys.argv[-1] # get input file name", "round to 0.2f if nearest > distance or (nearest == distance and int(near_id)", "nearest = 10000000 # INT MAX near_id = '' for addr_id in req_addresses:", "distances[i]['location']), 2) time = d / TRANSPORTS[req['transport']] * 60 # minutes total_time -=", "else: # break for i in xrange(len(distances) - 1): j = i +", "60 # minutes # total_time -= time # if total_time >= 0: #", "minutes total_time -= time print distances[i], distances[j], d, time, total_time if total_time >=", "= line.split() req = {'location': (float(lines[0]), float(lines[1])), 'transport': lines[2], 'time': int(lines[3])} reqs.append(req) nearby_attraction(addresses,", "' else: break print res.strip() def distance_between(point1, point2): point1_lat_in_radians = degree2radians(point1[0]) point1_long_in_radians =", "0.00}) distances.sort(key=lambda distance: (distance['distance'], distance['id'])) # sort by id # print distances total_time,", "degree2radians(point1[0]) point1_long_in_radians = degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) *", "(float(lines[1]), float(lines[2]))} addresses.append(address) m = int(input_file.readline()) for i in xrange(m): line = input_file.readline()", "in addresses: req_addresses[addr['id']] = addr['location'] res = '' while True: nearest = 10000000", "req_addresses.pop(near_id) # remove point travelled print req_addresses, '----->', near_id, req_point, nearest, req_time, time", "reqs: distances = [] for addr in addrs: addr['distance'] = round(distance_between(req['location'], addr['location']), 2)", "i in xrange(n): line = input_file.readline() lines = line.split() address = {'id': int(lines[0]),", "time if req_time >= 0: res += str(near_id) + ' ' else: break", "minutes # total_time -= time # if total_time >= 0: # res +=", "input_file.readline() lines = line.split() address = {'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))} addresses.append(address) m", "if nearest > distance or (nearest == distance and int(near_id) > int(addr_id)): nearest", "= line.split() address = {'id': int(lines[0]), 'location': (float(lines[1]), float(lines[2]))} addresses.append(address) m = int(input_file.readline())", "time = d['distance'] / TRANSPORTS[req['transport']] * 60 # minutes # total_time -= time", "addr_id in req_addresses: addr_point = req_addresses[addr_id] distance = round(distance_between(req_point, addr_point), 2) # round", "= degree2radians(point1[1]) point2_lat_in_radians = degree2radians(point2[0]) point2_long_in_radians = degree2radians(point2[1]) return float(math.acos(math.sin(point1_lat_in_radians) * math.sin(point2_lat_in_radians) +", "-= time # if total_time >= 0: # res += str(d['id']) + '", "10000000 # INT MAX near_id = '' for addr_id in req_addresses: addr_point =", "= int(input_file.readline()) for i in xrange(n): line = input_file.readline() lines = line.split() address", "d['distance'] / TRANSPORTS[req['transport']] * 60 # minutes # total_time -= time # if", "xrange(m): line = input_file.readline() lines = line.split() req = {'location': (float(lines[0]), float(lines[1])), 'transport':", "= req['transport'] req_time = req['time'] req_addresses = {} for addr in addresses: req_addresses[addr['id']]", "* EARTH_RADIUS) def degree2radians(degree): return float(degree * 2 * PI / 360) if", "'location': req['location'], 'distance': 0.00}) distances.sort(key=lambda distance: (distance['distance'], distance['id'])) # sort by id #", "lines = line.split() req = {'location': (float(lines[0]), float(lines[1])), 'transport': lines[2], 'time': int(lines[3])} reqs.append(req)", "'distance': 0.00}) distances.sort(key=lambda distance: (distance['distance'], distance['id'])) # sort by id # print distances", "print req_addresses, '----->', near_id, req_point, nearest, req_time, time if req_time >= 0: res", "distances.sort(key=lambda distance: (distance['distance'], distance['id'])) # sort by id # print distances total_time, res", "distances.append({'id': 0, 'location': req['location'], 'distance': 0.00}) distances.sort(key=lambda distance: (distance['distance'], distance['id'])) # sort by", "and int(near_id) > int(addr_id)): nearest = distance near_id = addr_id time = float(nearest", "* PI / 360) if __name__ == '__main__': arg = sys.argv[-1] # get", "+ ' ' else: break if not req_addresses: break print res.strip() def nearby_attractions(addrs,", "req_trans = req['transport'] req_time = req['time'] req_addresses = {} for addr in addresses:", "-= time print distances[i], distances[j], d, time, total_time if total_time >= 0: res", "line.split() req = {'location': (float(lines[0]), float(lines[1])), 'transport': lines[2], 'time': int(lines[3])} reqs.append(req) nearby_attraction(addresses, reqs)", "def nearby_attraction(addresses, reqs): for req in reqs: req_point = req['location'] req_trans = req['transport']", "addresses: req_addresses[addr['id']] = addr['location'] res = '' while True: nearest = 10000000 #", "distance: (distance['distance'], distance['id'])) # sort by id # print distances total_time, res =", "reqs = [], [] with open(arg, 'r') as input_file: n = int(input_file.readline()) for", "req_point = req['location'] req_trans = req['transport'] req_time = req['time'] req_addresses = {} for", "while True: nearest = 10000000 # INT MAX near_id = '' for addr_id", "float(lines[2]))} addresses.append(address) m = int(input_file.readline()) for i in xrange(m): line = input_file.readline() lines", "PI = 3.14159265359 EARTH_RADIUS = 6371 # in km TRANSPORTS = { 'metro':", "'' while True: nearest = 10000000 # INT MAX near_id = '' for", "not req_addresses: break print res.strip() def nearby_attractions(addrs, reqs): for req in reqs: distances", "addr['distance'] = round(distance_between(req['location'], addr['location']), 2) # round to 0.2f distances.append(addr) # addrs with", "addr['location']), 2) # round to 0.2f distances.append(addr) # addrs with distances distances.append({'id': 0,", "total_time if total_time >= 0: res += str(distances[j]['id']) + ' ' else: break", "total_time >= 0: # res += str(d['id']) + ' ' # else: #", "= round(distance_between(req_point, addr_point), 2) # round to 0.2f if nearest > distance or", "python # -*- coding: utf-8 -*- __author__ = 'tanchao' import sys import math", "req_addresses: break print res.strip() def nearby_attractions(addrs, reqs): for req in reqs: distances =", "math.cos(point1_lat_in_radians) * math.cos(point2_lat_in_radians) * math.cos(point2_long_in_radians - point1_long_in_radians)) * EARTH_RADIUS) def degree2radians(degree): return float(degree", "# remove point travelled print req_addresses, '----->', near_id, req_point, nearest, req_time, time if", "= addr_id time = float(nearest / TRANSPORTS[req_trans] * 60) req_time -= time req_point", "= {} for addr in addresses: req_addresses[addr['id']] = addr['location'] res = '' while", "0, 'location': req['location'], 'distance': 0.00}) distances.sort(key=lambda distance: (distance['distance'], distance['id'])) # sort by id", "{ 'metro': 20, 'bike': 15, 'foot': 5 } ''' {'15': (52.357895, 4.892835), '14':", "nearby_attractions(addrs, reqs): for req in reqs: distances = [] for addr in addrs:", "addr_point = req_addresses[addr_id] distance = round(distance_between(req_point, addr_point), 2) # round to 0.2f if", "'r') as input_file: n = int(input_file.readline()) for i in xrange(n): line = input_file.readline()", "(52.342497, 4.855094), '1': (52.378281, 4.90007), '3': (52.375737, 4.896547), '2': (52.373634, 4.890289), '5': (52.376237,", "xrange(n): line = input_file.readline() lines = line.split() address = {'id': int(lines[0]), 'location': (float(lines[1])," ]
[ "'<KEY> # SECURITY WARNING: don't run with debug turned on in production! DEBUG", "'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, \"media\")", "'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER = config('EMAIL_HOST_USER') EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')", "= [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',", "'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', }", "'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL = '/media/'", "'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS':", "= True USE_TZ = True STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS", "the secret key used in production secret! SECRET_KEY = '<KEY> # SECURITY WARNING:", "production! DEBUG = True ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [", "'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS", "LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ", "}, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },", "'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC'", "os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser Endpoints Config DJOSER =", "'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': {", "'users.User' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME':", "'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True", "[ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL", "'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL", "= 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER =", "# SECURITY WARNING: don't run with debug turned on in production! DEBUG =", "# https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N =", "'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser', 'v1.users', 'v1.shop', 'v1.item',", "False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120),", "= 1 REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE = [", "'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser', 'v1.users', 'v1.shop', 'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard',", "[ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware',", "= { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL =", "[ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser', 'v1.users', 'v1.shop',", "https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } }", "] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS':", "USE_TZ = True STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS = [", "'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME':", "turned on in production! DEBUG = True ALLOWED_HOSTS = ['*'] # Application definition", "'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls'", "], } MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware',", "Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N", "{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE", "] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N =", "Djoser Endpoints Config DJOSER = { 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE':", "}, ] WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL = 'users.User' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES", "os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY", "secret key used in production secret! SECRET_KEY = '<KEY> # SECURITY WARNING: don't", "SECRET_KEY = '<KEY> # SECURITY WARNING: don't run with debug turned on in", "'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [", "[ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },", "'djoser', 'v1.users', 'v1.shop', 'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard', ] SITE_ID = 1 REST_FRAMEWORK =", "'v1.dashboard', ] SITE_ID = 1 REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], }", "True ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes',", "ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True,", "'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL = 'users.User' # Database", "WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL = 'users.User' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = {", "}, }, ] WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL = 'users.User' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases", "TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL", "[ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser Endpoints Config DJOSER = { 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL':", "] SITE_ID = 1 REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE", "= True ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth',", "'/media/' MEDIA_ROOT = os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT =", "# https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), }", "True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT", "True USE_TZ = True STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS =", "{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, {", "'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE':", "STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] #", "'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES =", "'config.wsgi.application' AUTH_USER_MODEL = 'users.User' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': {", "'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser', 'v1.users', 'v1.shop', 'v1.item', 'v1.category',", "True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30),", "'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL =", "'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser', 'v1.users', 'v1.shop', 'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard', ] SITE_ID", "\"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True", "validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',", "from decouple import config import os from datetime import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))", "True STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ]", "used in production secret! SECRET_KEY = '<KEY> # SECURITY WARNING: don't run with", "'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] #", "= { 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL':", "True MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST =", "in production secret! SECRET_KEY = '<KEY> # SECURITY WARNING: don't run with debug", "'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates',", "REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware',", "'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE =", "'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard', ] SITE_ID = 1 REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [", "'v1.partner', 'v1.dashboard', ] SITE_ID = 1 REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ],", "} # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },", "= True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' STATIC_ROOT =", "'static/') AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser Endpoints Config DJOSER = {", "'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND':", "EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER", "{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ]", "WARNING: keep the secret key used in production secret! SECRET_KEY = '<KEY> #", "[], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], },", "'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages',", "'corsheaders', 'djoser', 'v1.users', 'v1.shop', 'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard', ] SITE_ID = 1 REST_FRAMEWORK", "True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True,", "'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [],", "'#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule'", "Config DJOSER = { 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE':", "'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL = 'users.User' #", "}, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us'", "} CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND =", "import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING: keep the secret key used", "import os from datetime import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING: keep", "SITE_ID = 1 REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE =", "= { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware',", "} MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware',", "# Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, {", "= [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [", "import config import os from datetime import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY", "os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS =", "] WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL = 'users.User' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES =", "True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR,", "'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [", "= [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser', 'v1.users',", "'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',", "= '<KEY> # SECURITY WARNING: don't run with debug turned on in production!", "'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ {", "# Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR,", "USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' STATIC_ROOT", "['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles',", "], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL = 'users.User' # Database #", "# Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True", "{ 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION =", "https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, {", "STATIC_ROOT = os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser Endpoints Config", "'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'config.urls' TEMPLATES", "'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser', 'v1.users', 'v1.shop', 'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard', ]", "= [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF", "'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser', 'v1.users', 'v1.shop', 'v1.item', 'v1.category', 'v1.partner',", "}, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization #", "'v1.category', 'v1.partner', 'v1.dashboard', ] SITE_ID = 1 REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication',", "'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE", "= os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS", "{ 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}',", "'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS =", "'#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT = {", "} SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' }", "with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['*'] #", "[ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF =", "1 REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware',", "config import os from datetime import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING:", "Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),", "{ 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators", "= { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password", "{ 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation", "# Djoser Endpoints Config DJOSER = { 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False,", "MEDIA_ROOT = os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587", "'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND", "secret! SECRET_KEY = '<KEY> # SECURITY WARNING: don't run with debug turned on", "[ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug',", "AUTH_USER_MODEL = 'users.User' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE':", "{ 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL = True", "= 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL =", "True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR,", "DJOSER = { 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True,", "debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['*'] # Application", "'rest_framework', 'corsheaders', 'djoser', 'v1.users', 'v1.shop', 'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard', ] SITE_ID = 1", "MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ]", "timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL = '/media/' MEDIA_ROOT", "# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },", "= 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS':", "{ 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware',", "'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ],", "} } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',", "True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ]", "USE_L10N = True USE_TZ = True STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/')", "'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' AUTH_USER_MODEL = 'users.User'", "] # Djoser Endpoints Config DJOSER = { 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL':", "definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders',", "'/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser Endpoints", "Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME':", "run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['*']", "'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME':", "INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser',", "BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING: keep the secret key used in production", "MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com'", "decouple import config import os from datetime import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) #", "datetime import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING: keep the secret key", "EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER = config('EMAIL_HOST_USER') EMAIL_HOST_PASSWORD", "}, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N", "= os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING: keep the secret key used in production secret!", "'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL = '/media/' MEDIA_ROOT =", "'v1.users', 'v1.shop', 'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard', ] SITE_ID = 1 REST_FRAMEWORK = {", "'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization", "WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS", "AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser Endpoints Config DJOSER = { 'USER_CREATE_PASSWORD_RETYPE':", "Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'rest_framework',", "os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ {", "= True MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST", "TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors':", "ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions',", "= ['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages',", "production secret! SECRET_KEY = '<KEY> # SECURITY WARNING: don't run with debug turned", "'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE =", "don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS =", "'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/'", "= '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser", "'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME':", "'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN':", "DEBUG = True ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin',", "os from datetime import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING: keep the", "from datetime import timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING: keep the secret", "SECURITY WARNING: don't run with debug turned on in production! DEBUG = True", "'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware',", "{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/", "# SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY =", "'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER = config('EMAIL_HOST_USER')", "'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation #", "in production! DEBUG = True ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS =", "'v1.shop', 'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard', ] SITE_ID = 1 REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES':", "{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request',", "SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '<KEY>", "https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True", "key used in production secret! SECRET_KEY = '<KEY> # SECURITY WARNING: don't run", "'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application'", "AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME':", "SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL", "'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME':", "= '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT", "= 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER = config('EMAIL_HOST_USER') EMAIL_HOST_PASSWORD =", "= os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser Endpoints Config DJOSER", "timedelta BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # SECURITY WARNING: keep the secret key used in", "'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True, 'PASSWORD_RESET_CONFIRM_RETYPE': True, 'PASSWORD_RESET_CONFIRM_URL': '#/password/reset/confirm/{uid}/{token}', } SIMPLE_JWT =", "on in production! DEBUG = True ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS", "keep the secret key used in production secret! SECRET_KEY = '<KEY> # SECURITY", "'rest_framework_simplejwt.authentication.JWTAuthentication', ], } MIDDLEWARE = [ 'corsheaders.middleware.CorsMiddleware', 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware',", "'django.contrib.sites', 'rest_framework', 'corsheaders', 'djoser', 'v1.users', 'v1.shop', 'v1.item', 'v1.category', 'v1.partner', 'v1.dashboard', ] SITE_ID =", "= 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ =", "= 'users.User' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3',", "= 'config.wsgi.application' AUTH_USER_MODEL = 'users.User' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default':", "timedelta(minutes=120), 'REFRESH_TOKEN_LIFETIME': timedelta(days=30), 'UPDATE_LAST_LOGIN': True, 'USER_AUTHENTICATION_RULE': 'v1.third_party.rest_framework_simplejwt.authentication.custom_user_authentication_rule' } CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL =", "'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth',", "'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser Endpoints Config DJOSER = { 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}',", "'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION", "CORS_ORIGIN_ALLOW_ALL = True MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, \"media\") EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'", "# Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites',", "Endpoints Config DJOSER = { 'USER_CREATE_PASSWORD_RETYPE': True, 'ACTIVATION_URL': '#/activate/{uid}/{token}', 'SEND_ACTIVATION_EMAIL': False, 'SET_PASSWORD_RETYPE': True,", "DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } #", "= True STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static/') AUTHENTICATION_BACKENDS = [ 'django.contrib.auth.backends.AllowAllUsersModelBackend',", "= [ 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] # Djoser Endpoints Config DJOSER = { 'USER_CREATE_PASSWORD_RETYPE': True,", "'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }," ]
[ "from my_mod import enlarge, decimate class TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10), 1000) def test_decimate(self):", "decimate class TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10), 1000) def test_decimate(self): self.assertEqual(decimate(100), 90) if __name__", "def test_enlarge(self): self.assertEqual(enlarge(10), 1000) def test_decimate(self): self.assertEqual(decimate(100), 90) if __name__ == '__main__': unittest.main()", "unittest from my_mod import enlarge, decimate class TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10), 1000) def", "TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10), 1000) def test_decimate(self): self.assertEqual(decimate(100), 90) if __name__ == '__main__':", "import enlarge, decimate class TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10), 1000) def test_decimate(self): self.assertEqual(decimate(100), 90)", "enlarge, decimate class TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10), 1000) def test_decimate(self): self.assertEqual(decimate(100), 90) if", "import unittest from my_mod import enlarge, decimate class TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10), 1000)", "<filename>my_lambdata/my_test.py import unittest from my_mod import enlarge, decimate class TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10),", "my_mod import enlarge, decimate class TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10), 1000) def test_decimate(self): self.assertEqual(decimate(100),", "class TestMathFunctions(unittest.TestCase): def test_enlarge(self): self.assertEqual(enlarge(10), 1000) def test_decimate(self): self.assertEqual(decimate(100), 90) if __name__ ==" ]
[ "karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] #", "of the first computer?\",\"8.When was the search for a modern computer first?\",\"9.when did", "lifeline = 0 # for i in range(len(question_list)): # print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) #", "kr chuke hai\") # print() # else: # print(\"sadly! Aapka jawab galat hai\")", "above\",\"4.Wolfrom Alpha\",\"4.non of the above\",\"4.non of the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] #", "sahi hai\") # print() # else: # print(\"Aap lifelife use kr chuke hai\")", "# print \"App jeet gaaye,\" # print \"win_Rs\",win_Rs[i+1] # else: # print \"App", "solution_list = [3, 4, 1] # lifeline = 0 # for i in", "full name of CPU\",\"13.which of these is the search engine?\",\"14.which of the input", "byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] # fourth_options=[\"4.non of", "gaya hai\" # print \" \" # print \" \" # print \"Congratulation", "the great revalution in the field of computer?\",\"10.what is hindi name of computer?\",\"11.computer", "\"Agriculture\"]] # solution_list = [3, 4, 1] # lifeline = 0 # for", "# elif user == 5050: # if lifeline == 0: # lifeline+=1 #", "print \"App haar gaaye \" # print \"total_Rs\",win_Rs[i] # break # if i==4:", "# print \" \" # print \"Congratulation Aap\",win_Rs[i],\"Aap etane rupees jeet chuke hai\"", "option \")) # if user == ans_key[i]: # print \"App jeet gaaye,\" #", "Aap\",win_Rs[i],\"Aap etane rupees jeet chuke hai\" ####kbc2### # question_list = [\"1.How many continents", "print(\"congrats! Aapka answer sahi hai\") # print() # elif user == 5050: #", "print \"App jeet gaaye,\" # print \"win_Rs\",win_Rs[i+1] # else: # print \"App haar", "the correct option \")) # if user == ans_key[i]: # print \"App jeet", "\"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]] # solution_list =", "kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega", "gaaye \" # print \"total_Rs\",win_Rs[i] # break # if i==4: # print \"congrats!", "# print(\"congrats! Aapka answer sahi hai\") # print() # elif user == 5050:", "gaya hai\" # print \" \" # elif i==9: # print \"congrats! Aapka", "\" \" # elif i==9: # print \"congrats! Aapka padaav pura ho gaya", "= int(input(\"Enter the correct option \")) # if user == solution_list[i]: # print(\"congrats!", "fourth_options[i] # user = int(raw_input(\"Enter the correct option \")) # if user ==", "of these is the search engine?\",\"14.which of the input units is?\",\"15.how many bytes", "# if lifeline == 0: # lifeline+=1 # a = solution_list[i]-1 # print(question_list[i])", "# print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) # print(2,options_list[i][1]) # print(3,options_list[i][2]) # print(4,options_list[i][3]) # user =", "search engine?\",\"14.which of the input units is?\",\"15.how many bytes of 1 KB are", "day is celebrated?\",\"12.what is the full name of CPU\",\"13.which of these is the", "input units is?\",\"15.how many bytes of 1 KB are equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1.", "\"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]] # solution_list = [3, 4, 1] # lifeline", "colour?\",\"4.what is tree colour?\",\"5.how many months there are in a year?\",\"6.who is the", "win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i in range(len(question_list)): # print question_list[i], len(question_list[i]) #", "answer sahi hai\") # print() # elif user == 5050: # if lifeline", "user = int(input(\"Enter the correct option \")) # if user == solution_list[i]: #", "Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] # fourth_options=[\"4.non", "# else: # print \"App haar gaaye \" # print \"total_Rs\",win_Rs[i] # break", "first_options[i] # print second_options[i] # print third_options[i] # print fourth_options[i] # user =", "hai\" ####kbc2### # question_list = [\"1.How many continents are there?\", \"2.What is the", "== 5050: # if lifeline == 0: # lifeline+=1 # a = solution_list[i]-1", "i in range(len(question_list)): # print question_list[i], len(question_list[i]) # print first_options[i] # print second_options[i]", "the mahabharat?\",\"2.what is the capital of India?\",\"3.what is apples colour?\",\"4.what is tree colour?\",\"5.how", "print \"Congratulation Aap\",win_Rs[i],\"Aap etane rupees jeet chuke hai\" ####kbc2### # question_list = [\"1.How", "ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i in range(len(question_list)): # print question_list[i], len(question_list[i]) # print first_options[i]", "Aapka padaav pura ho gaya hai\" # print \" \" # elif i==9:", "many continents are there?\", \"2.What is the capital of India?\", \"3.NG mei kaun", "\"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]] # solution_list = [3, 4, 1]", "# user = int(raw_input(\"Enter the correct option \")) # if user == ans_key[i]:", "the capital of India?\",\"3.what is apples colour?\",\"4.what is tree colour?\",\"5.how many months there", "# lifeline = 0 # for i in range(len(question_list)): # print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0])", "many bytes of 1 KB are equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central", "jaata hai?\"] # options_list = [[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\",", "elif user == 5050: # if lifeline == 0: # lifeline+=1 # a", "# if user_input == 1: # print(\"Congratulation Aapka answer sahi hai\") # print()", "mei kaun se course padhaya jaata hai?\"] # options_list = [[\"Four\", \"Nine\", \"Seven\",", "ans_key[i]: # print \"App jeet gaaye,\" # print \"win_Rs\",win_Rs[i+1] # else: # print", "are there?\", \"2.What is the capital of India?\", \"3.NG mei kaun se course", "# break # if i==4: # print \"congrats! Aapka padaav pura ho gaya", "user == solution_list[i]: # print(\"congrats! Aapka answer sahi hai\") # print() # elif", "print(2,options_list[i][i]) # user_input = int(input(\"Enter the correct option \")) # if user_input ==", "a year?\",\"6.who is the computer invetor?\",\"7.What was the of the first computer?\",\"8.When was", "byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central", "print(\"Aap lifelife use kr chuke hai\") # print() # else: # print(\"sadly! Aapka", "ho gaya hai\" # print \" \" # print \" \" # print", "<gh_stars>1-10 # question_list=[\"1.who wrote the mahabharat?\",\"2.what is the capital of India?\",\"3.what is apples", "Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] # fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2", "sahi hai\") # print() # elif user == 5050: # if lifeline ==", "option \")) # if user == solution_list[i]: # print(\"congrats! Aapka answer sahi hai\")", "# lifeline+=1 # a = solution_list[i]-1 # print(question_list[i]) # print(1,options_list[i][a]) # print(2,options_list[i][i]) #", "# print \"win_Rs\",win_Rs[i+1] # else: # print \"App haar gaaye \" # print", "question_list[i], len(question_list[i]) # print first_options[i] # print second_options[i] # print third_options[i] # print", "\"Congratulation Aap\",win_Rs[i],\"Aap etane rupees jeet chuke hai\" ####kbc2### # question_list = [\"1.How many", "# print(\"Aap lifelife use kr chuke hai\") # print() # else: # print(\"sadly!", "= [3, 4, 1] # lifeline = 0 # for i in range(len(question_list)):", "# print(1,options_list[i][a]) # print(2,options_list[i][i]) # user_input = int(input(\"Enter the correct option \")) #", "of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the above\",\"4.Wolfrom Alpha\",\"4.non of the", "options_list = [[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]]", "correct option \")) # if user == ans_key[i]: # print \"App jeet gaaye,\"", "modern computer first?\",\"9.when did the great revalution in the field of computer?\",\"10.what is", "== solution_list[i]: # print(\"congrats! Aapka answer sahi hai\") # print() # elif user", "capital of India?\",\"3.what is apples colour?\",\"4.what is tree colour?\",\"5.how many months there are", "these is the search engine?\",\"14.which of the input units is?\",\"15.how many bytes of", "karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] # fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the", "jeet chuke hai\" ####kbc2### # question_list = [\"1.How many continents are there?\", \"2.What", "# ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i in range(len(question_list)): # print question_list[i], len(question_list[i]) # print", "= solution_list[i]-1 # print(question_list[i]) # print(1,options_list[i][a]) # print(2,options_list[i][i]) # user_input = int(input(\"Enter the", "Aapka answer sahi hai\") # print() # else: # print(\"Aap lifelife use kr", "name of CPU\",\"13.which of these is the search engine?\",\"14.which of the input units", "the search for a modern computer first?\",\"9.when did the great revalution in the", "1] # lifeline = 0 # for i in range(len(question_list)): # print(question_list[i]),len(question_list[i]) #", "mahabharat?\",\"2.what is the capital of India?\",\"3.what is apples colour?\",\"4.what is tree colour?\",\"5.how many", "= int(raw_input(\"Enter the correct option \")) # if user == ans_key[i]: # print", "is the capital of India?\",\"3.what is apples colour?\",\"4.what is tree colour?\",\"5.how many months", "# elif i==9: # print \"congrats! Aapka padaav pura ho gaya hai\" #", "pura ho gaya hai\" # print \" \" # elif i==9: # print", "# if i==4: # print \"congrats! Aapka padaav pura ho gaya hai\" #", "\"Tourism\", \"Agriculture\"]] # solution_list = [3, 4, 1] # lifeline = 0 #", "\"Counseling\", \"Tourism\", \"Agriculture\"]] # solution_list = [3, 4, 1] # lifeline = 0", "# second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing", "padaav pura ho gaya hai\" # print \" \" # print \" \"", "# print \"congrats! Aapka padaav pura ho gaya hai\" # print \" \"", "lifeline+=1 # a = solution_list[i]-1 # print(question_list[i]) # print(1,options_list[i][a]) # print(2,options_list[i][i]) # user_input", "answer sahi hai\") # print() # else: # print(\"Aap lifelife use kr chuke", "# first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024", "if user_input == 1: # print(\"Congratulation Aapka answer sahi hai\") # print() #", "\")) # if user_input == 1: # print(\"Congratulation Aapka answer sahi hai\") #", "int(input(\"Enter the correct option \")) # if user == solution_list[i]: # print(\"congrats! Aapka", "is apples colour?\",\"4.what is tree colour?\",\"5.how many months there are in a year?\",\"6.who", "in range(len(question_list)): # print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) # print(2,options_list[i][1]) # print(3,options_list[i][2]) # print(4,options_list[i][3]) #", "5050: # if lifeline == 0: # lifeline+=1 # a = solution_list[i]-1 #", "of computer?\",\"11.computer literacy day is celebrated?\",\"12.what is the full name of CPU\",\"13.which of", "literacy day is celebrated?\",\"12.what is the full name of CPU\",\"13.which of these is", "print(1,options_list[i][a]) # print(2,options_list[i][i]) # user_input = int(input(\"Enter the correct option \")) # if", "India?\", \"3.NG mei kaun se course padhaya jaata hai?\"] # options_list = [[\"Four\",", "continents are there?\", \"2.What is the capital of India?\", \"3.NG mei kaun se", "chuke hai\" ####kbc2### # question_list = [\"1.How many continents are there?\", \"2.What is", "question_list=[\"1.who wrote the mahabharat?\",\"2.what is the capital of India?\",\"3.what is apples colour?\",\"4.what is", "Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]] # solution_list = [3, 4, 1] # lifeline =", "else: # print(\"Aap lifelife use kr chuke hai\") # print() # else: #", "\" # print \"Congratulation Aap\",win_Rs[i],\"Aap etane rupees jeet chuke hai\" ####kbc2### # question_list", "the capital of India?\", \"3.NG mei kaun se course padhaya jaata hai?\"] #", "of the above\",\"4.non of the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i", "solution_list[i]-1 # print(question_list[i]) # print(1,options_list[i][a]) # print(2,options_list[i][i]) # user_input = int(input(\"Enter the correct", "the above\",\"4.non of the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i in", "CPU\",\"13.which of these is the search engine?\",\"14.which of the input units is?\",\"15.how many", "# solution_list = [3, 4, 1] # lifeline = 0 # for i", "\"total_Rs\",win_Rs[i] # break # if i==4: # print \"congrats! Aapka padaav pura ho", "# print(2,options_list[i][i]) # user_input = int(input(\"Enter the correct option \")) # if user_input", "the first computer?\",\"8.When was the search for a modern computer first?\",\"9.when did the", "of the above\",\"4.Wolfrom Alpha\",\"4.non of the above\",\"4.non of the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] #", "\"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]] # solution_list = [3, 4,", "print second_options[i] # print third_options[i] # print fourth_options[i] # user = int(raw_input(\"Enter the", "\" # print \"total_Rs\",win_Rs[i] # break # if i==4: # print \"congrats! Aapka", "correct option \")) # if user == solution_list[i]: # print(\"congrats! Aapka answer sahi", "print \" \" # elif i==9: # print \"congrats! Aapka padaav pura ho", "field of computer?\",\"10.what is hindi name of computer?\",\"11.computer literacy day is celebrated?\",\"12.what is", "mega byte\"] # fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the", "tree colour?\",\"5.how many months there are in a year?\",\"6.who is the computer invetor?\",\"7.What", "\" \" # print \" \" # print \"Congratulation Aap\",win_Rs[i],\"Aap etane rupees jeet", "in range(len(question_list)): # print question_list[i], len(question_list[i]) # print first_options[i] # print second_options[i] #", "Disember\",\"4.non of the above\",\"4.Wolfrom Alpha\",\"4.non of the above\",\"4.non of the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000]", "print(question_list[i]) # print(1,options_list[i][a]) # print(2,options_list[i][i]) # user_input = int(input(\"Enter the correct option \"))", "is the capital of India?\", \"3.NG mei kaun se course padhaya jaata hai?\"]", "for i in range(len(question_list)): # print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) # print(2,options_list[i][1]) # print(3,options_list[i][2]) #", "\"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]] # solution_list = [3, 4, 1] #", "processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] # fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non", "above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the above\",\"4.Wolfrom Alpha\",\"4.non of the above\",\"4.non of the above\"] #", "of India?\",\"3.what is apples colour?\",\"4.what is tree colour?\",\"5.how many months there are in", "computer first?\",\"9.when did the great revalution in the field of computer?\",\"10.what is hindi", "# print(question_list[i]) # print(1,options_list[i][a]) # print(2,options_list[i][i]) # user_input = int(input(\"Enter the correct option", "wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"]", "\")) # if user == ans_key[i]: # print \"App jeet gaaye,\" # print", "# print first_options[i] # print second_options[i] # print third_options[i] # print fourth_options[i] #", "third_options[i] # print fourth_options[i] # user = int(raw_input(\"Enter the correct option \")) #", "user_input = int(input(\"Enter the correct option \")) # if user_input == 1: #", "pura ho gaya hai\" # print \" \" # print \" \" #", "search for a modern computer first?\",\"9.when did the great revalution in the field", "the full name of CPU\",\"13.which of these is the search engine?\",\"14.which of the", "\" \" # print \"Congratulation Aap\",win_Rs[i],\"Aap etane rupees jeet chuke hai\" ####kbc2### #", "the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i in range(len(question_list)): # print", "computer?\",\"11.computer literacy day is celebrated?\",\"12.what is the full name of CPU\",\"13.which of these", "processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab", "jeet gaaye,\" # print \"win_Rs\",win_Rs[i+1] # else: # print \"App haar gaaye \"", "# print \"Congratulation Aap\",win_Rs[i],\"Aap etane rupees jeet chuke hai\" ####kbc2### # question_list =", "if user == ans_key[i]: # print \"App jeet gaaye,\" # print \"win_Rs\",win_Rs[i+1] #", "above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i in range(len(question_list)): # print question_list[i],", "of 1 KB are equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024", "# print question_list[i], len(question_list[i]) # print first_options[i] # print second_options[i] # print third_options[i]", "hai\") # print() # else: # print(\"Aap lifelife use kr chuke hai\") #", "\")) # if user == solution_list[i]: # print(\"congrats! Aapka answer sahi hai\") #", "there?\", \"2.What is the capital of India?\", \"3.NG mei kaun se course padhaya", "print \"congrats! Aapka padaav pura ho gaya hai\" # print \" \" #", "is the search engine?\",\"14.which of the input units is?\",\"15.how many bytes of 1", "Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] #", "Aapka padaav pura ho gaya hai\" # print \" \" # print \"", "engine?\",\"14.which of the input units is?\",\"15.how many bytes of 1 KB are equal", "of the input units is?\",\"15.how many bytes of 1 KB are equal to?\"]", "byte\"] # fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the above\",\"4.Wolfrom", "# print \"App haar gaaye \" # print \"total_Rs\",win_Rs[i] # break # if", "is hindi name of computer?\",\"11.computer literacy day is celebrated?\",\"12.what is the full name", "to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem", "of India?\", \"3.NG mei kaun se course padhaya jaata hai?\"] # options_list =", "\"3.NG mei kaun se course padhaya jaata hai?\"] # options_list = [[\"Four\", \"Nine\",", "revalution in the field of computer?\",\"10.what is hindi name of computer?\",\"11.computer literacy day", "# print fourth_options[i] # user = int(raw_input(\"Enter the correct option \")) # if", "are in a year?\",\"6.who is the computer invetor?\",\"7.What was the of the first", "print() # elif user == 5050: # if lifeline == 0: # lifeline+=1", "\"App jeet gaaye,\" # print \"win_Rs\",win_Rs[i+1] # else: # print \"App haar gaaye", "[\"1.How many continents are there?\", \"2.What is the capital of India?\", \"3.NG mei", "# for i in range(len(question_list)): # print question_list[i], len(question_list[i]) # print first_options[i] #", "0 # for i in range(len(question_list)): # print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) # print(2,options_list[i][1]) #", "of the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i in range(len(question_list)): #", "# for i in range(len(question_list)): # print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) # print(2,options_list[i][1]) # print(3,options_list[i][2])", "units is?\",\"15.how many bytes of 1 KB are equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna", "# print() # elif user == 5050: # if lifeline == 0: #", "\"win_Rs\",win_Rs[i+1] # else: # print \"App haar gaaye \" # print \"total_Rs\",win_Rs[i] #", "the field of computer?\",\"10.what is hindi name of computer?\",\"11.computer literacy day is celebrated?\",\"12.what", "Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] # fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of", "i==4: # print \"congrats! Aapka padaav pura ho gaya hai\" # print \"", "print(4,options_list[i][3]) # user = int(input(\"Enter the correct option \")) # if user ==", "baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] # fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of", "equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central", "\"congrats! Aapka padaav pura ho gaya hai\" # print \" \" # elif", "correct option \")) # if user_input == 1: # print(\"Congratulation Aapka answer sahi", "= int(input(\"Enter the correct option \")) # if user_input == 1: # print(\"Congratulation", "in the field of computer?\",\"10.what is hindi name of computer?\",\"11.computer literacy day is", "# print(1,options_list[i][0]) # print(2,options_list[i][1]) # print(3,options_list[i][2]) # print(4,options_list[i][3]) # user = int(input(\"Enter the", "lifelife use kr chuke hai\") # print() # else: # print(\"sadly! Aapka jawab", "# print(3,options_list[i][2]) # print(4,options_list[i][3]) # user = int(input(\"Enter the correct option \")) #", "== 0: # lifeline+=1 # a = solution_list[i]-1 # print(question_list[i]) # print(1,options_list[i][a]) #", "1: # print(\"Congratulation Aapka answer sahi hai\") # print() # else: # print(\"Aap", "first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega", "# question_list = [\"1.How many continents are there?\", \"2.What is the capital of", "# if user == solution_list[i]: # print(\"congrats! Aapka answer sahi hai\") # print()", "user == ans_key[i]: # print \"App jeet gaaye,\" # print \"win_Rs\",win_Rs[i+1] # else:", "if user == solution_list[i]: # print(\"congrats! Aapka answer sahi hai\") # print() #", "colour?\",\"5.how many months there are in a year?\",\"6.who is the computer invetor?\",\"7.What was", "# user_input = int(input(\"Enter the correct option \")) # if user_input == 1:", "name of computer?\",\"11.computer literacy day is celebrated?\",\"12.what is the full name of CPU\",\"13.which", "range(len(question_list)): # print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) # print(2,options_list[i][1]) # print(3,options_list[i][2]) # print(4,options_list[i][3]) # user", "# fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the above\",\"4.Wolfrom Alpha\",\"4.non", "the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the above\",\"4.Wolfrom Alpha\",\"4.non of the above\",\"4.non of the above\"]", "is celebrated?\",\"12.what is the full name of CPU\",\"13.which of these is the search", "# user = int(input(\"Enter the correct option \")) # if user == solution_list[i]:", "was the search for a modern computer first?\",\"9.when did the great revalution in", "padhaya jaata hai?\"] # options_list = [[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software", "# print(\"Congratulation Aapka answer sahi hai\") # print() # else: # print(\"Aap lifelife", "i==9: # print \"congrats! Aapka padaav pura ho gaya hai\" # print \"", "hai\" # print \" \" # elif i==9: # print \"congrats! Aapka padaav", "the input units is?\",\"15.how many bytes of 1 KB are equal to?\"] #", "are equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS", "problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"]", "invetor?\",\"7.What was the of the first computer?\",\"8.When was the search for a modern", "\" # elif i==9: # print \"congrats! Aapka padaav pura ho gaya hai\"", "# third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] # fourth_options=[\"4.non of the", "print(3,options_list[i][2]) # print(4,options_list[i][3]) # user = int(input(\"Enter the correct option \")) # if", "== 1: # print(\"Congratulation Aapka answer sahi hai\") # print() # else: #", "for i in range(len(question_list)): # print question_list[i], len(question_list[i]) # print first_options[i] # print", "Aapka answer sahi hai\") # print() # elif user == 5050: # if", "elif i==9: # print \"congrats! Aapka padaav pura ho gaya hai\" # print", "was the of the first computer?\",\"8.When was the search for a modern computer", "padaav pura ho gaya hai\" # print \" \" # elif i==9: #", "\" # print \" \" # print \"Congratulation Aap\",win_Rs[i],\"Aap etane rupees jeet chuke", "is tree colour?\",\"5.how many months there are in a year?\",\"6.who is the computer", "[[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]] # solution_list", "# print(4,options_list[i][3]) # user = int(input(\"Enter the correct option \")) # if user", "computer?\",\"10.what is hindi name of computer?\",\"11.computer literacy day is celebrated?\",\"12.what is the full", "# print second_options[i] # print third_options[i] # print fourth_options[i] # user = int(raw_input(\"Enter", "the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the above\",\"4.Wolfrom Alpha\",\"4.non of the above\",\"4.non", "0: # lifeline+=1 # a = solution_list[i]-1 # print(question_list[i]) # print(1,options_list[i][a]) # print(2,options_list[i][i])", "= [[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]] #", "print \"total_Rs\",win_Rs[i] # break # if i==4: # print \"congrats! Aapka padaav pura", "= 0 # for i in range(len(question_list)): # print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) # print(2,options_list[i][1])", "user = int(raw_input(\"Enter the correct option \")) # if user == ans_key[i]: #", "apples colour?\",\"4.what is tree colour?\",\"5.how many months there are in a year?\",\"6.who is", "else: # print \"App haar gaaye \" # print \"total_Rs\",win_Rs[i] # break #", "hindi name of computer?\",\"11.computer literacy day is celebrated?\",\"12.what is the full name of", "above\",\"4.non of the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i in range(len(question_list)):", "# print \" \" # print \" \" # print \"Congratulation Aap\",win_Rs[i],\"Aap etane", "solution_list[i]: # print(\"congrats! Aapka answer sahi hai\") # print() # elif user ==", "print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) # print(2,options_list[i][1]) # print(3,options_list[i][2]) # print(4,options_list[i][3]) # user = int(input(\"Enter", "# win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for i in range(len(question_list)): # print question_list[i], len(question_list[i])", "fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the above\",\"4.Wolfrom Alpha\",\"4.non of", "print(\"Congratulation Aapka answer sahi hai\") # print() # else: # print(\"Aap lifelife use", "# question_list=[\"1.who wrote the mahabharat?\",\"2.what is the capital of India?\",\"3.what is apples colour?\",\"4.what", "# print third_options[i] # print fourth_options[i] # user = int(raw_input(\"Enter the correct option", "there are in a year?\",\"6.who is the computer invetor?\",\"7.What was the of the", "# a = solution_list[i]-1 # print(question_list[i]) # print(1,options_list[i][a]) # print(2,options_list[i][i]) # user_input =", "first computer?\",\"8.When was the search for a modern computer first?\",\"9.when did the great", "# else: # print(\"Aap lifelife use kr chuke hai\") # print() # else:", "celebrated?\",\"12.what is the full name of CPU\",\"13.which of these is the search engine?\",\"14.which", "print(2,options_list[i][1]) # print(3,options_list[i][2]) # print(4,options_list[i][3]) # user = int(input(\"Enter the correct option \"))", "range(len(question_list)): # print question_list[i], len(question_list[i]) # print first_options[i] # print second_options[i] # print", "# print \"total_Rs\",win_Rs[i] # break # if i==4: # print \"congrats! Aapka padaav", "user == 5050: # if lifeline == 0: # lifeline+=1 # a =", "is the computer invetor?\",\"7.What was the of the first computer?\",\"8.When was the search", "is the full name of CPU\",\"13.which of these is the search engine?\",\"14.which of", "bytes of 1 KB are equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing", "print third_options[i] # print fourth_options[i] # user = int(raw_input(\"Enter the correct option \"))", "# if user == ans_key[i]: # print \"App jeet gaaye,\" # print \"win_Rs\",win_Rs[i+1]", "print question_list[i], len(question_list[i]) # print first_options[i] # print second_options[i] # print third_options[i] #", "print(1,options_list[i][0]) # print(2,options_list[i][1]) # print(3,options_list[i][2]) # print(4,options_list[i][3]) # user = int(input(\"Enter the correct", "second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024", "ho gaya hai\" # print \" \" # elif i==9: # print \"congrats!", "etane rupees jeet chuke hai\" ####kbc2### # question_list = [\"1.How many continents are", "== ans_key[i]: # print \"App jeet gaaye,\" # print \"win_Rs\",win_Rs[i+1] # else: #", "of computer?\",\"10.what is hindi name of computer?\",\"11.computer literacy day is celebrated?\",\"12.what is the", "gaaye,\" # print \"win_Rs\",win_Rs[i+1] # else: # print \"App haar gaaye \" #", "the correct option \")) # if user == solution_list[i]: # print(\"congrats! Aapka answer", "above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the above\",\"4.Wolfrom Alpha\",\"4.non of the above\",\"4.non of", "of CPU\",\"13.which of these is the search engine?\",\"14.which of the input units is?\",\"15.how", "hai\") # print() # elif user == 5050: # if lifeline == 0:", "many months there are in a year?\",\"6.who is the computer invetor?\",\"7.What was the", "1 KB are equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"]", "KB are equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5 Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] #", "len(question_list[i]) # print first_options[i] # print second_options[i] # print third_options[i] # print fourth_options[i]", "# print \" \" # elif i==9: # print \"congrats! Aapka padaav pura", "kaun se course padhaya jaata hai?\"] # options_list = [[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\",", "break # if i==4: # print \"congrats! Aapka padaav pura ho gaya hai\"", "# print() # else: # print(\"Aap lifelife use kr chuke hai\") # print()", "computer invetor?\",\"7.What was the of the first computer?\",\"8.When was the search for a", "[3, 4, 1] # lifeline = 0 # for i in range(len(question_list)): #", "int(input(\"Enter the correct option \")) # if user_input == 1: # print(\"Congratulation Aapka", "is?\",\"15.how many bytes of 1 KB are equal to?\"] # first_options=[\"1.vedavyas\",\"1.Delhi\",\"1.red\",\"1.purple\",\"1.15\",\"1. wannumen\",\"1.ATARIS\",\"1.1949\",\"1.1977\",\"1.garna karnewaala\",\"1.5", "great revalution in the field of computer?\",\"10.what is hindi name of computer?\",\"11.computer literacy", "Alpha\",\"4.non of the above\",\"4.non of the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1] # for", "print \" \" # print \"Congratulation Aap\",win_Rs[i],\"Aap etane rupees jeet chuke hai\" ####kbc2###", "computer?\",\"8.When was the search for a modern computer first?\",\"9.when did the great revalution", "the computer invetor?\",\"7.What was the of the first computer?\",\"8.When was the search for", "a = solution_list[i]-1 # print(question_list[i]) # print(1,options_list[i][a]) # print(2,options_list[i][i]) # user_input = int(input(\"Enter", "the above\",\"4.Wolfrom Alpha\",\"4.non of the above\",\"4.non of the above\"] # win_Rs=[000,1000,2000,3000,5000,10000,20000,40000,80000,160000,320000,640000,1250000,2500000,5000000,10000000] # ans_key=[2,1,1,2,4,3,2,3,4,2,4,1,4,4,1]", "user_input == 1: # print(\"Congratulation Aapka answer sahi hai\") # print() # else:", "Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22", "i in range(len(question_list)): # print(question_list[i]),len(question_list[i]) # print(1,options_list[i][0]) # print(2,options_list[i][1]) # print(3,options_list[i][2]) # print(4,options_list[i][3])", "for a modern computer first?\",\"9.when did the great revalution in the field of", "hai\" # print \" \" # print \" \" # print \"Congratulation Aap\",win_Rs[i],\"Aap", "\"App haar gaaye \" # print \"total_Rs\",win_Rs[i] # break # if i==4: #", "course padhaya jaata hai?\"] # options_list = [[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\",", "####kbc2### # question_list = [\"1.How many continents are there?\", \"2.What is the capital", "months there are in a year?\",\"6.who is the computer invetor?\",\"7.What was the of", "question_list = [\"1.How many continents are there?\", \"2.What is the capital of India?\",", "print fourth_options[i] # user = int(raw_input(\"Enter the correct option \")) # if user", "= [\"1.How many continents are there?\", \"2.What is the capital of India?\", \"3.NG", "year?\",\"6.who is the computer invetor?\",\"7.What was the of the first computer?\",\"8.When was the", "capital of India?\", \"3.NG mei kaun se course padhaya jaata hai?\"] # options_list", "a modern computer first?\",\"9.when did the great revalution in the field of computer?\",\"10.what", "int(raw_input(\"Enter the correct option \")) # if user == ans_key[i]: # print \"App", "print() # else: # print(\"Aap lifelife use kr chuke hai\") # print() #", "Disember\",\"1.Central processing Unit\",\"1.Google\",\"1.mouse\",\"1.1024 byte\"] # second_options=[\"2.valmiki\",\"2.bhopal\",\"2.blue\",\"2.Green\",\"2.6\",\"2.GS kilvi\",\"2.ENIC\",\"2.1951\",\"2.2000\",\"2.sangndak\",\"2.14.Disember\",\"2.Central problem Unit\",\"2.Yahoo\",\"2.key_board\",\"2.1024 Gega byte\"] # third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls", "if lifeline == 0: # lifeline+=1 # a = solution_list[i]-1 # print(question_list[i]) #", "print \" \" # print \" \" # print \"Congratulation Aap\",win_Rs[i],\"Aap etane rupees", "\"2.What is the capital of India?\", \"3.NG mei kaun se course padhaya jaata", "second_options[i] # print third_options[i] # print fourth_options[i] # user = int(raw_input(\"Enter the correct", "first?\",\"9.when did the great revalution in the field of computer?\",\"10.what is hindi name", "use kr chuke hai\") # print() # else: # print(\"sadly! Aapka jawab galat", "did the great revalution in the field of computer?\",\"10.what is hindi name of", "option \")) # if user_input == 1: # print(\"Congratulation Aapka answer sahi hai\")", "4, 1] # lifeline = 0 # for i in range(len(question_list)): # print(question_list[i]),len(question_list[i])", "third_options=[\"3.tulsidas\",\"3.jaipur\",\"3.yello\",\"3.white\",\"3.13\",\"3.charls baibej\",\"3.TANDY\",\"3.1946\",\"3.1955\",\"3.hisab karnewaala\",\"3.22 Disember\",\"3.Central processing Union\",\"3.Baidu\",\"3.scanner\",\"3.1024 mega byte\"] # fourth_options=[\"4.non of the above\",\"4.chandigarh\",\"4.black\",\"4.pink\",\"4.12\",\"4.non", "India?\",\"3.what is apples colour?\",\"4.what is tree colour?\",\"5.how many months there are in a", "\"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\", \"Agriculture\"]] # solution_list = [3,", "wrote the mahabharat?\",\"2.what is the capital of India?\",\"3.what is apples colour?\",\"4.what is tree", "lifeline == 0: # lifeline+=1 # a = solution_list[i]-1 # print(question_list[i]) # print(1,options_list[i][a])", "in a year?\",\"6.who is the computer invetor?\",\"7.What was the of the first computer?\",\"8.When", "the of the first computer?\",\"8.When was the search for a modern computer first?\",\"9.when", "print \"win_Rs\",win_Rs[i+1] # else: # print \"App haar gaaye \" # print \"total_Rs\",win_Rs[i]", "if i==4: # print \"congrats! Aapka padaav pura ho gaya hai\" # print", "of the above\",\"4.NOVELLA\",\"4.1947\",\"4.1960\",\"4.parigadak\",\"4.2 Disember\",\"4.non of the above\",\"4.Wolfrom Alpha\",\"4.non of the above\",\"4.non of the", "# print(2,options_list[i][1]) # print(3,options_list[i][2]) # print(4,options_list[i][3]) # user = int(input(\"Enter the correct option", "se course padhaya jaata hai?\"] # options_list = [[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\",", "the search engine?\",\"14.which of the input units is?\",\"15.how many bytes of 1 KB", "rupees jeet chuke hai\" ####kbc2### # question_list = [\"1.How many continents are there?\",", "\"congrats! Aapka padaav pura ho gaya hai\" # print \" \" # print", "# options_list = [[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\", \"Tourism\",", "haar gaaye \" # print \"total_Rs\",win_Rs[i] # break # if i==4: # print", "the correct option \")) # if user_input == 1: # print(\"Congratulation Aapka answer", "hai?\"] # options_list = [[\"Four\", \"Nine\", \"Seven\", \"Eight\"],[\"Chandigarh\", \"Bhopal\", \"Chennai\", \"Delhi\"],[\"Software Engineering\", \"Counseling\",", "print first_options[i] # print second_options[i] # print third_options[i] # print fourth_options[i] # user" ]
[ "__version__ = 1.0 from .em import * from .functions_em import * from .py3", "* from .functions_em import * from .py3 import * from .rotation import *", "1.0 from .em import * from .functions_em import * from .py3 import *", "<filename>clumpy/__init__.py __version__ = 1.0 from .em import * from .functions_em import * from", ".em import * from .functions_em import * from .py3 import * from .rotation", "import * from .functions_em import * from .py3 import * from .rotation import", "from .em import * from .functions_em import * from .py3 import * from", "= 1.0 from .em import * from .functions_em import * from .py3 import" ]
[ "def build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\" \"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header =", ":qshape[1]], dataccd[:qshape[0], qshape[1]:] ] else: raise ValueError(f\"qid must be 1->4 {qid} given\") return", "cls.from_filenames(filenames, use_dask=use_dask, **kwargs) # ============= # # Methods # # ============= # def", "Parameters ---------- qid: [int or None/'*'] which quadrant you want ? - int:", "== \"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif groupby == \"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else:", "data. Parameters ---------- qid: [int or None/'*'] which quadrant you want ? -", "# BUILDER # # -------- # def build(self, corr_nl=True, corr_overscan=True, clipping=True, set_it=False, inclheader=True,", "get_filepath filename = get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid) return cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod def", "..metadata import get_rawmeta from ..io import get_filepath meta = get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid,", "get_quadrant_data().\") # ==================== # # # # Flat Builder # # # #", "data, header = bflat.build(set_it=False, **kwargs) return cls(data, header=None, use_dask=True) # ============== # #", "= flatfilenames return this @classmethod def from_date(cls, date, ledid, use_dask=True, **kwargs): \"\"\" \"\"\"", "int(qid) dataccd = self.get_data(**kwargs) # this accounts for all rotation and rebin did", "qid, **kwargs): \"\"\" **kwargs goes to get_data() this then split the data. Parameters", "dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else: data = fits.getdata(fitsfile) header= fits.getheader(fitsfile) this =", "header = self.imgcollection.get_singleheader(refid, as_serie=True) if type(header) == dask.dataframe.core.Series: header = header.compute() header =", "implemented (read_fits) ; {filename} given\") @classmethod def from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs):", "ccdid=None, ledid=None, groupby=\"day\"): \"\"\" \"\"\" # IRSA metadata from ..metadata import get_rawmeta from", "== 2: data_ = dataccd[qshape[0]:, :qshape[1]] elif qid == 3: data_ = dataccd[:qshape[0],", "from ztfquery import io outs = [] for i_, s_ in build_dataframe.iterrows(): #", "**kwargs) def get_quadrant(self, *args, **kwargs): \"\"\" \"\"\" raise NotImplemented(\"get_quadrant() is not usable as", "numpy as np import dask import dask.array as da import warnings from astropy.io", "def ledid_to_filtername(ledid): \"\"\" \"\"\" for f_,v_ in LED_FILTER.items(): if int(ledid) in v_: return", "library to build the ztfin2p3 pipeline screen flats \"\"\" import os import numpy", "build_dataframe.iterrows(): # fileout = s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed files =", "in flatfilenames: ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames", "f_,v_ in LED_FILTER.items(): if int(ledid) in v_: return f_ raise ValueError(f\"Unknown led with", "fits.getheader(fitsfile) this = cls(data=data, header=header, use_dask=use_dask) this._filename = fitsfile return this @classmethod def", "ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for id_, s_ in datapath.iterrows()] return datapath def build_from_datapath(build_dataframe, assume_exist=False,", "data_ class FlatFocalPlane( FocalPlane ): @classmethod def from_filenames(cls, flatfilenames, use_dask=True, **kwargs): \"\"\" \"\"\"", "return datapath def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs): \"\"\" \"\"\" if not assume_exist:", "all quadrant return as list [1,2,3,4] **kwargs goes to get_data() Returns ------- ndarray", "this accounts for all rotation and rebin did before qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\")", "\"\"\" # IRSA metadata from ..metadata import get_rawmeta from ..io import get_filepath meta", "for file_ in flatfilenames: ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_,", "date, ledid, ccdid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath filename =", "None: self.set_header(header) # ============== # # I/O # # ============== # @classmethod def", "import numpy as np import dask import dask.array as da import warnings from", "from ..io import get_filepath meta = get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True) #", "this @classmethod def build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\" \"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False) data,", "4: data_ = dataccd[:qshape[0], qshape[1]:] elif qid is None or qid in [\"*\",\"all\"]:", "dtype=\"int\") if qid == 1: data_ = dataccd[qshape[0]:, qshape[1]:] elif qid == 2:", "cls.read_fits(filename, use_dask=use_dask) else: raise NotImplementedError(f\"Only fits file loader implemented (read_fits) ; {filename} given\")", "\"\"\" this = cls(use_dask=use_dask) for file_ in flatfilenames: ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs)", "dataccd[:qshape[0], qshape[1]:] ] else: raise ValueError(f\"qid must be 1->4 {qid} given\") return data_", "meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only groupby day or month implemented: {groupby} given\") datapath =", "'*'/'all': all quadrant return as list [1,2,3,4] **kwargs goes to get_data() Returns -------", "qid is None or qid in [\"*\",\"all\"]: data_ = [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]],", "this run ztfquery.io.get_file() ? \"\"\" from ztfquery import io basename = os.path.basename(filename) if", "return cls(data, header=None, use_dask=True) # ============== # # Method # # ============== #", "not None: self.set_header(header) # ============== # # I/O # # ============== # @classmethod", "newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\") if inclinput: basenames = self.imgcollection.filenames for i, basename_ in", "rebin did before qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid == 1: data_ =", "corr_overscan=True, clipping=True, set_it=False, inclheader=True, **kwargs): \"\"\" \"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader,", "metadata from ..metadata import get_rawmeta from ..io import get_filepath meta = get_rawmeta(\"flat\", date,", "# ==================== # from .builder import CalibrationBuilder class FlatBuilder( CalibrationBuilder ): # --------", "datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for id_, s_ in datapath.iterrows()] return", "def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs): \"\"\" \"\"\" if not assume_exist: from ztfquery", "@classmethod def from_date(cls, date, ledid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath", "are CCD-base. See get_quadrant_data().\") # ==================== # # # # Flat Builder #", "meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for id_,", "LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], } def ledid_to_filtername(ledid): \"\"\" \"\"\" for f_,v_ in", "header is not None: self.set_header(header) # ============== # # I/O # # ==============", "to get_data() Returns ------- ndarray (numpy or dask) \"\"\" if qid in [\"*\",\"all\"]:", "LED_FILTER.items(): if int(ledid) in v_: return f_ raise ValueError(f\"Unknown led with ID {ledid}\")", "6160, 6144 QUADRANT_SHAPE = 3080, 3072 def __init__(self, data, header=None, use_dask=True): \"\"\" \"\"\"", "groupby=\"day\"): \"\"\" \"\"\" # IRSA metadata from ..metadata import get_rawmeta from ..io import", "from ztfimg.base import _Image_, FocalPlane LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], } def ledid_to_filtername(ledid):", "ztfquery import io basename = os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename) if", "== \"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only groupby day or month implemented:", "use_dask=use_dask) this._filename = fitsfile return this @classmethod def build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\" \"\"\"", "f_ raise ValueError(f\"Unknown led with ID {ledid}\") def get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"): \"\"\"", "4 - None or '*'/'all': all quadrant return as list [1,2,3,4] **kwargs goes", "header = header.compute() header = header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\")", "use_dask=use_dask, **kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames = flatfilenames return this @classmethod", "{\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], } def ledid_to_filtername(ledid): \"\"\" \"\"\" for f_,v_ in LED_FILTER.items(): if", "# this accounts for all rotation and rebin did before qshape = np.asarray(np.asarray(dataccd.shape)/2,", "ccdid, qid = self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self, *args, **kwargs): \"\"\" \"\"\"", "dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else: data = fits.getdata(fitsfile) header= fits.getheader(fitsfile) this = cls(data=data, header=header,", "ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames = flatfilenames return this @classmethod def from_date(cls,", "in datapath.iterrows()] return datapath def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs): \"\"\" \"\"\" if", "): SHAPE = 6160, 6144 QUADRANT_SHAPE = 3080, 3072 def __init__(self, data, header=None,", "groupby == \"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif groupby == \"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6]", "= header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\") if inclinput: basenames =", "s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed files = s_[\"filepath\"] if not assume_exist:", "basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename) if \".fits\" in basename: return cls.read_fits(filename, use_dask=use_dask) else: raise", "file loader implemented (read_fits) ; {filename} given\") @classmethod def from_date(cls, date, ledid, ccdid,", "flats \"\"\" import os import numpy as np import dask import dask.array as", "or qid in [\"*\",\"all\"]: data_ = [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0],", "which quadrant you want ? - int: 1,2,3 or 4 - None or", "# def build(self, corr_nl=True, corr_overscan=True, clipping=True, set_it=False, inclheader=True, **kwargs): \"\"\" \"\"\" return super().build(corr_nl=corr_nl,", "def build(self, corr_nl=True, corr_overscan=True, clipping=True, set_it=False, inclheader=True, **kwargs): \"\"\" \"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan,", "dataccd[qshape[0]:, qshape[1]:] elif qid == 2: data_ = dataccd[qshape[0]:, :qshape[1]] elif qid ==", "if int(ledid) in v_: return f_ raise ValueError(f\"Unknown led with ID {ledid}\") def", "date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True) # Parsing out what to do: if groupby", "============== # @classmethod def from_filename(cls, filename, use_dask=True, assume_exist=True): \"\"\" loads the object given", "[\"*\",\"all\"]: qid = None if qid is not None: qid = int(qid) dataccd", "fitsfile return this @classmethod def build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\" \"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles,", "\"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid, as_serie=True) if type(header) == dask.dataframe.core.Series: header = header.compute()", "== 3: data_ = dataccd[:qshape[0], :qshape[1]] elif qid == 4: data_ = dataccd[:qshape[0],", "BUILDER # # -------- # def build(self, corr_nl=True, corr_overscan=True, clipping=True, set_it=False, inclheader=True, **kwargs):", "= FlatBuilder.from_rawfiles(files, persist=False) data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs) output = dask.delayed(fits.writeto)(fileout, data,", ":qshape[1]] elif qid == 3: data_ = dataccd[:qshape[0], :qshape[1]] elif qid == 4:", "# Methods # # ============= # def get_quadrant_data(self, rcid, **kwargs): \"\"\" \"\"\" ccdid,", "inclinput: basenames = self.imgcollection.filenames for i, basename_ in enumerate(basenames): newheader.set(f\"INPUT{i:02d}\",basename_, \"input image\") return", "== 1: data_ = dataccd[qshape[0]:, qshape[1]:] elif qid == 2: data_ = dataccd[qshape[0]:,", "as np import dask import dask.array as da import warnings from astropy.io import", "= fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\") if inclinput: basenames = self.imgcollection.filenames for i,", "\"\"\" from ..io import get_filepath filename = get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid) return cls.from_filename(filename,", "# fileout = s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed files = s_[\"filepath\"]", "= None if qid is not None: qid = int(qid) dataccd = self.get_data(**kwargs)", "<filename>ztfin2p3/calibration/flat.py \"\"\" library to build the ztfin2p3 pipeline screen flats \"\"\" import os", "is not None: qid = int(qid) dataccd = self.get_data(**kwargs) # this accounts for", "then split the data. Parameters ---------- qid: [int or None/'*'] which quadrant you", "meta = get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True) # Parsing out what to", "else: raise ValueError(f\"qid must be 1->4 {qid} given\") return data_ class FlatFocalPlane( FocalPlane", "ccdid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath filename = get_filepath(\"flat\", date,", "elif qid == 2: data_ = dataccd[qshape[0]:, :qshape[1]] elif qid == 3: data_", "inclheader=False, overwrite=True, **kwargs): \"\"\" \"\"\" if not assume_exist: from ztfquery import io outs", "dataccd = self.get_data(**kwargs) # this accounts for all rotation and rebin did before", "# # # # ==================== # from .builder import CalibrationBuilder class FlatBuilder( CalibrationBuilder", "get_filepath meta = get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True) # Parsing out what", "ztfin2p3 pipeline screen flats \"\"\" import os import numpy as np import dask", "exist_ok=True) # build if needed files = s_[\"filepath\"] if not assume_exist: files =", "inclheader=inclheader, **kwargs) output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite) outs.append(output) return outs class Flat(", "# ============== # # Method # # ============== # def get_quadrant_data(self, qid, **kwargs):", "= [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:] ] else: raise ValueError(f\"qid", "\"\"\" \"\"\" from astropy.io import fits if keys is None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\",", "\"\"\" \"\"\" if not assume_exist: from ztfquery import io outs = [] for", "dataccd[qshape[0]:, :qshape[1]] elif qid == 3: data_ = dataccd[:qshape[0], :qshape[1]] elif qid ==", "astropy.io import fits from ztfimg.base import _Image_, FocalPlane LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13],", "as list [1,2,3,4] **kwargs goes to get_data() Returns ------- ndarray (numpy or dask)", "\"\"\" library to build the ztfin2p3 pipeline screen flats \"\"\" import os import", "= fits.getdata(fitsfile) header= fits.getheader(fitsfile) this = cls(data=data, header=header, use_dask=use_dask) this._filename = fitsfile return", "\"\"\" \"\"\" from ..io import get_filepath filename = get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid) return", "@classmethod def build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\" \"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header", "use_dask=True) # ============== # # Method # # ============== # def get_quadrant_data(self, qid,", "want ? - int: 1,2,3 or 4 - None or '*'/'all': all quadrant", "data_ = dataccd[qshape[0]:, :qshape[1]] elif qid == 3: data_ = dataccd[:qshape[0], :qshape[1]] elif", "fileout = s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed files = s_[\"filepath\"] if", "raise ValueError(f\"Only groupby day or month implemented: {groupby} given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"]", "the data. Parameters ---------- qid: [int or None/'*'] which quadrant you want ?", "overwrite=overwrite) outs.append(output) return outs class Flat( _Image_ ): SHAPE = 6160, 6144 QUADRANT_SHAPE", "**kwargs): \"\"\" **kwargs goes to get_data() this then split the data. Parameters ----------", "for id_, s_ in datapath.iterrows()] return datapath def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs):", "class Flat( _Image_ ): SHAPE = 6160, 6144 QUADRANT_SHAPE = 3080, 3072 def", "out what to do: if groupby == \"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif groupby", "do: if groupby == \"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif groupby == \"month\": meta[groupby]", "_ = super().__init__(use_dask=use_dask) self.set_data(data) if header is not None: self.set_header(header) # ============== #", "class FlatBuilder( CalibrationBuilder ): # -------- # # BUILDER # # -------- #", "\"\"\" \"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs) def build_header(self, keys=None, refid=0,", "] else: raise ValueError(f\"qid must be 1->4 {qid} given\") return data_ class FlatFocalPlane(", "**kwargs): \"\"\" \"\"\" raise NotImplemented(\"get_quadrant() is not usable as flat are CCD-base. See", "is not usable as flat are CCD-base. See get_quadrant_data().\") # ==================== # #", "header= dask.delayed(fits.getheader)(fitsfile) else: data = fits.getdata(fitsfile) header= fits.getheader(fitsfile) this = cls(data=data, header=header, use_dask=use_dask)", "be 1->4 {qid} given\") return data_ class FlatFocalPlane( FocalPlane ): @classmethod def from_filenames(cls,", "Parameters ---------- assume_exist: [bool] Shall this run ztfquery.io.get_file() ? \"\"\" from ztfquery import", "qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid == 1: data_ = dataccd[qshape[0]:, qshape[1]:] elif", "= bflat.build(set_it=False, **kwargs) return cls(data, header=None, use_dask=True) # ============== # # Method #", "qid == 4: data_ = dataccd[:qshape[0], qshape[1]:] elif qid is None or qid", "_Image_, FocalPlane LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], } def ledid_to_filtername(ledid): \"\"\" \"\"\" for", "all rotation and rebin did before qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid ==", "output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite) outs.append(output) return outs class Flat( _Image_ ):", "Flat( _Image_ ): SHAPE = 6160, 6144 QUADRANT_SHAPE = 3080, 3072 def __init__(self,", "\"\"\" loads the object given the input file. Parameters ---------- assume_exist: [bool] Shall", "_Image_ ): SHAPE = 6160, 6144 QUADRANT_SHAPE = 3080, 3072 def __init__(self, data,", "= {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], } def ledid_to_filtername(ledid): \"\"\" \"\"\" for f_,v_ in LED_FILTER.items():", "cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod def read_fits(cls, fitsfile, use_dask=True): \"\"\" \"\"\" if use_dask: data", "header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\") if inclinput: basenames = self.imgcollection.filenames", "filename, use_dask=True, assume_exist=True): \"\"\" loads the object given the input file. Parameters ----------", "ValueError(f\"Only groupby day or month implemented: {groupby} given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] =", "= dataccd[qshape[0]:, qshape[1]:] elif qid == 2: data_ = dataccd[qshape[0]:, :qshape[1]] elif qid", "# ==================== # # # # Flat Builder # # # # ====================", "[int or None/'*'] which quadrant you want ? - int: 1,2,3 or 4", "super().__init__(use_dask=use_dask) self.set_data(data) if header is not None: self.set_header(header) # ============== # # I/O", "header= fits.getheader(fitsfile) this = cls(data=data, header=header, use_dask=use_dask) this._filename = fitsfile return this @classmethod", "[bool] Shall this run ztfquery.io.get_file() ? \"\"\" from ztfquery import io basename =", "= int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames = flatfilenames return this @classmethod def from_date(cls, date,", "or '*'/'all': all quadrant return as list [1,2,3,4] **kwargs goes to get_data() Returns", "ledid, ccdid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath filename = get_filepath(\"flat\",", "if keys is None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"]", "import fits if keys is None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\",", "\".fits\" in basename: return cls.read_fits(filename, use_dask=use_dask) else: raise NotImplementedError(f\"Only fits file loader implemented", "IRSA metadata from ..metadata import get_rawmeta from ..io import get_filepath meta = get_rawmeta(\"flat\",", "FocalPlane LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], } def ledid_to_filtername(ledid): \"\"\" \"\"\" for f_,v_", "\"\"\" \"\"\" _ = super().__init__(use_dask=use_dask) self.set_data(data) if header is not None: self.set_header(header) #", ".builder import CalibrationBuilder class FlatBuilder( CalibrationBuilder ): # -------- # # BUILDER #", "= FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header = bflat.build(set_it=False, **kwargs) return cls(data, header=None, use_dask=True) #", "datapath.iterrows()] return datapath def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs): \"\"\" \"\"\" if not", "build the ztfin2p3 pipeline screen flats \"\"\" import os import numpy as np", "use_dask=True, **kwargs): \"\"\" \"\"\" this = cls(use_dask=use_dask) for file_ in flatfilenames: ccd_ =", "as flat are CCD-base. See get_quadrant_data().\") # ==================== # # # # Flat", "with ID {ledid}\") def get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"): \"\"\" \"\"\" # IRSA metadata", "fitsfile, use_dask=True): \"\"\" \"\"\" if use_dask: data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header=", "in build_dataframe.iterrows(): # fileout = s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed files", "if needed files = s_[\"filepath\"] if not assume_exist: files = io.bulk_get_file(files) # bflat", "# ============= # # Methods # # ============= # def get_quadrant_data(self, rcid, **kwargs):", "3: data_ = dataccd[:qshape[0], :qshape[1]] elif qid == 4: data_ = dataccd[:qshape[0], qshape[1]:]", "): @classmethod def from_filenames(cls, flatfilenames, use_dask=True, **kwargs): \"\"\" \"\"\" this = cls(use_dask=use_dask) for", "raise NotImplemented(\"get_quadrant() is not usable as flat are CCD-base. See get_quadrant_data().\") # ====================", "this._filename = fitsfile return this @classmethod def build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\" \"\"\" bflat", "@classmethod def from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import", "ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True) # Parsing out what to do: if groupby ==", "if type(header) == dask.dataframe.core.Series: header = header.compute() header = header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict())", "= self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self, *args, **kwargs): \"\"\" \"\"\" raise NotImplemented(\"get_quadrant()", "getwhat=\"filepath\", in_meta=True) # Parsing out what to do: if groupby == \"day\": meta[groupby]", "if header is not None: self.set_header(header) # ============== # # I/O # #", "filename = io.get_file(filename) if \".fits\" in basename: return cls.read_fits(filename, use_dask=use_dask) else: raise NotImplementedError(f\"Only", "groupby day or month implemented: {groupby} given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername)", "**kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames = flatfilenames return this @classmethod def", "NotImplementedError(f\"Only fits file loader implemented (read_fits) ; {filename} given\") @classmethod def from_date(cls, date,", "==================== # # # # Flat Builder # # # # ==================== #", "refid=0, inclinput=False): \"\"\" \"\"\" from astropy.io import fits if keys is None: keys", "in [\"*\",\"all\"]: data_ = [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:] ]", "Method # # ============== # def get_quadrant_data(self, qid, **kwargs): \"\"\" **kwargs goes to", "the object given the input file. Parameters ---------- assume_exist: [bool] Shall this run", "basename = os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename) if \".fits\" in basename:", "qid = int(qid) dataccd = self.get_data(**kwargs) # this accounts for all rotation and", "# # ============= # def get_quadrant_data(self, rcid, **kwargs): \"\"\" \"\"\" ccdid, qid =", "files = io.bulk_get_file(files) # bflat = FlatBuilder.from_rawfiles(files, persist=False) data, header = bflat.build(set_it=False, inclheader=inclheader,", "import get_filepath ccdids = np.arange(1,17) filenames = [get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid) for ccdid_", "outs class Flat( _Image_ ): SHAPE = 6160, 6144 QUADRANT_SHAPE = 3080, 3072", "rcid, **kwargs): \"\"\" \"\"\" ccdid, qid = self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self,", "SHAPE = 6160, 6144 QUADRANT_SHAPE = 3080, 3072 def __init__(self, data, header=None, use_dask=True):", "# bflat = FlatBuilder.from_rawfiles(files, persist=False) data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs) output =", "ledid=None, groupby=\"day\"): \"\"\" \"\"\" # IRSA metadata from ..metadata import get_rawmeta from ..io", "\"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid, as_serie=True) if type(header) == dask.dataframe.core.Series:", "): # -------- # # BUILDER # # -------- # def build(self, corr_nl=True,", "images\") if inclinput: basenames = self.imgcollection.filenames for i, basename_ in enumerate(basenames): newheader.set(f\"INPUT{i:02d}\",basename_, \"input", "-------- # # BUILDER # # -------- # def build(self, corr_nl=True, corr_overscan=True, clipping=True,", "# ============= # def get_quadrant_data(self, rcid, **kwargs): \"\"\" \"\"\" ccdid, qid = self.rcid_to_ccdid_qid(rcid)", "corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs) def build_header(self, keys=None, refid=0, inclinput=False): \"\"\" \"\"\" from", "3080, 3072 def __init__(self, data, header=None, use_dask=True): \"\"\" \"\"\" _ = super().__init__(use_dask=use_dask) self.set_data(data)", "= dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite) outs.append(output) return outs class Flat( _Image_ ): SHAPE", "= 3080, 3072 def __init__(self, data, header=None, use_dask=True): \"\"\" \"\"\" _ = super().__init__(use_dask=use_dask)", "? - int: 1,2,3 or 4 - None or '*'/'all': all quadrant return", "} def ledid_to_filtername(ledid): \"\"\" \"\"\" for f_,v_ in LED_FILTER.items(): if int(ledid) in v_:", "screen flats \"\"\" import os import numpy as np import dask import dask.array", "import CalibrationBuilder class FlatBuilder( CalibrationBuilder ): # -------- # # BUILDER # #", "None/'*'] which quadrant you want ? - int: 1,2,3 or 4 - None", "fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\") if inclinput: basenames = self.imgcollection.filenames for i, basename_", "import dask import dask.array as da import warnings from astropy.io import fits from", "---------- assume_exist: [bool] Shall this run ztfquery.io.get_file() ? \"\"\" from ztfquery import io", "1: data_ = dataccd[qshape[0]:, qshape[1]:] elif qid == 2: data_ = dataccd[qshape[0]:, :qshape[1]]", "get_quadrant_data(self, qid, **kwargs): \"\"\" **kwargs goes to get_data() this then split the data.", "None: qid = int(qid) dataccd = self.get_data(**kwargs) # this accounts for all rotation", "and rebin did before qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid == 1: data_", "did before qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid == 1: data_ = dataccd[qshape[0]:,", "= dataccd[qshape[0]:, :qshape[1]] elif qid == 3: data_ = dataccd[:qshape[0], :qshape[1]] elif qid", "ccdid_ in ccdids] return cls.from_filenames(filenames, use_dask=use_dask, **kwargs) # ============= # # Methods #", "= cls(use_dask=use_dask) for file_ in flatfilenames: ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid =", "ztfimg.base import _Image_, FocalPlane LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], } def ledid_to_filtername(ledid): \"\"\"", "None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid,", "# build if needed files = s_[\"filepath\"] if not assume_exist: files = io.bulk_get_file(files)", "qid == 3: data_ = dataccd[:qshape[0], :qshape[1]] elif qid == 4: data_ =", "flat are CCD-base. See get_quadrant_data().\") # ==================== # # # # Flat Builder", "CalibrationBuilder ): # -------- # # BUILDER # # -------- # def build(self,", "this._filenames = flatfilenames return this @classmethod def from_date(cls, date, ledid, use_dask=True, **kwargs): \"\"\"", "= int(qid) dataccd = self.get_data(**kwargs) # this accounts for all rotation and rebin", "NotImplemented(\"get_quadrant() is not usable as flat are CCD-base. See get_quadrant_data().\") # ==================== #", "# ============== # @classmethod def from_filename(cls, filename, use_dask=True, assume_exist=True): \"\"\" loads the object", "data, header=None, use_dask=True): \"\"\" \"\"\" _ = super().__init__(use_dask=use_dask) self.set_data(data) if header is not", "= da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else: data = fits.getdata(fitsfile) header= fits.getheader(fitsfile)", "**kwargs): \"\"\" \"\"\" this = cls(use_dask=use_dask) for file_ in flatfilenames: ccd_ = Flat.from_filename(file_,", "this @classmethod def from_date(cls, date, ledid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import", "clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs) def build_header(self, keys=None, refid=0, inclinput=False): \"\"\" \"\"\" from astropy.io", "raise NotImplementedError(f\"Only fits file loader implemented (read_fits) ; {filename} given\") @classmethod def from_date(cls,", "None if qid is not None: qid = int(qid) dataccd = self.get_data(**kwargs) #", "from_filename(cls, filename, use_dask=True, assume_exist=True): \"\"\" loads the object given the input file. Parameters", "given\") return data_ class FlatFocalPlane( FocalPlane ): @classmethod def from_filenames(cls, flatfilenames, use_dask=True, **kwargs):", "See get_quadrant_data().\") # ==================== # # # # Flat Builder # # #", "basename: return cls.read_fits(filename, use_dask=use_dask) else: raise NotImplementedError(f\"Only fits file loader implemented (read_fits) ;", "if not basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename) if \".fits\" in basename: return cls.read_fits(filename, use_dask=use_dask)", "FocalPlane ): @classmethod def from_filenames(cls, flatfilenames, use_dask=True, **kwargs): \"\"\" \"\"\" this = cls(use_dask=use_dask)", "get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True) # Parsing out what to do: if", "**kwargs): \"\"\" \"\"\" if not assume_exist: from ztfquery import io outs = []", "quadrant you want ? - int: 1,2,3 or 4 - None or '*'/'all':", "ztfquery.io.get_file() ? \"\"\" from ztfquery import io basename = os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"):", "dask.delayed(fits.getheader)(fitsfile) else: data = fits.getdata(fitsfile) header= fits.getheader(fitsfile) this = cls(data=data, header=header, use_dask=use_dask) this._filename", "np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid == 1: data_ = dataccd[qshape[0]:, qshape[1]:] elif qid ==", "------- ndarray (numpy or dask) \"\"\" if qid in [\"*\",\"all\"]: qid = None", "= super().__init__(use_dask=use_dask) self.set_data(data) if header is not None: self.set_header(header) # ============== # #", "filtername=s_.filtername) for id_, s_ in datapath.iterrows()] return datapath def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True,", "datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername)", "needed files = s_[\"filepath\"] if not assume_exist: files = io.bulk_get_file(files) # bflat =", "given\") @classmethod def from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io", "dask.array as da import warnings from astropy.io import fits from ztfimg.base import _Image_,", "file. Parameters ---------- assume_exist: [bool] Shall this run ztfquery.io.get_file() ? \"\"\" from ztfquery", "keys is None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header", "# -------- # def build(self, corr_nl=True, corr_overscan=True, clipping=True, set_it=False, inclheader=True, **kwargs): \"\"\" \"\"\"", "============== # # Method # # ============== # def get_quadrant_data(self, qid, **kwargs): \"\"\"", "header.compute() header = header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\") if inclinput:", "for f_,v_ in LED_FILTER.items(): if int(ledid) in v_: return f_ raise ValueError(f\"Unknown led", "def get_quadrant(self, *args, **kwargs): \"\"\" \"\"\" raise NotImplemented(\"get_quadrant() is not usable as flat", "# def get_quadrant_data(self, rcid, **kwargs): \"\"\" \"\"\" ccdid, qid = self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid,", "**kwargs) @classmethod def read_fits(cls, fitsfile, use_dask=True): \"\"\" \"\"\" if use_dask: data = da.from_delayed(", "s_ in datapath.iterrows()] return datapath def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs): \"\"\" \"\"\"", "shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else: data = fits.getdata(fitsfile) header= fits.getheader(fitsfile) this = cls(data=data,", "def get_quadrant_data(self, qid, **kwargs): \"\"\" **kwargs goes to get_data() this then split the", "= fitsfile return this @classmethod def build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\" \"\"\" bflat =", "split the data. Parameters ---------- qid: [int or None/'*'] which quadrant you want", "import _Image_, FocalPlane LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], } def ledid_to_filtername(ledid): \"\"\" \"\"\"", "[1,2,3,4] **kwargs goes to get_data() Returns ------- ndarray (numpy or dask) \"\"\" if", "= cls(data=data, header=header, use_dask=use_dask) this._filename = fitsfile return this @classmethod def build_from_rawfiles(cls, rawfiles,", "data_ = dataccd[qshape[0]:, qshape[1]:] elif qid == 2: data_ = dataccd[qshape[0]:, :qshape[1]] elif", "= s_[\"filepath\"] if not assume_exist: files = io.bulk_get_file(files) # bflat = FlatBuilder.from_rawfiles(files, persist=False)", "dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite) outs.append(output) return outs class Flat( _Image_ ): SHAPE =", "bflat = FlatBuilder.from_rawfiles(files, persist=False) data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs) output = dask.delayed(fits.writeto)(fileout,", "qshape[1]:] elif qid == 2: data_ = dataccd[qshape[0]:, :qshape[1]] elif qid == 3:", "= datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for id_, s_ in", "use_dask=True): \"\"\" \"\"\" if use_dask: data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile)", "ccdids = np.arange(1,17) filenames = [get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid) for ccdid_ in ccdids]", "assume_exist=False, inclheader=False, overwrite=True, **kwargs): \"\"\" \"\"\" if not assume_exist: from ztfquery import io", "# ============== # def get_quadrant_data(self, qid, **kwargs): \"\"\" **kwargs goes to get_data() this", "build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs): \"\"\" \"\"\" if not assume_exist: from ztfquery import", "elif qid is None or qid in [\"*\",\"all\"]: data_ = [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:,", "loads the object given the input file. Parameters ---------- assume_exist: [bool] Shall this", "==================== # from .builder import CalibrationBuilder class FlatBuilder( CalibrationBuilder ): # -------- #", "ndarray (numpy or dask) \"\"\" if qid in [\"*\",\"all\"]: qid = None if", "if not assume_exist: files = io.bulk_get_file(files) # bflat = FlatBuilder.from_rawfiles(files, persist=False) data, header", "# # BUILDER # # -------- # def build(self, corr_nl=True, corr_overscan=True, clipping=True, set_it=False,", "for ccdid_ in ccdids] return cls.from_filenames(filenames, use_dask=use_dask, **kwargs) # ============= # # Methods", "not usable as flat are CCD-base. See get_quadrant_data().\") # ==================== # # #", "filename = get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid) return cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod def read_fits(cls,", "= [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid, as_serie=True) if", "s_ in build_dataframe.iterrows(): # fileout = s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed", "int: 1,2,3 or 4 - None or '*'/'all': all quadrant return as list", "# IRSA metadata from ..metadata import get_rawmeta from ..io import get_filepath meta =", "data_ = [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:] ] else: raise", "\"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header = bflat.build(set_it=False, **kwargs) return cls(data, header=None,", "outs.append(output) return outs class Flat( _Image_ ): SHAPE = 6160, 6144 QUADRANT_SHAPE =", "dask) \"\"\" if qid in [\"*\",\"all\"]: qid = None if qid is not", "must be 1->4 {qid} given\") return data_ class FlatFocalPlane( FocalPlane ): @classmethod def", "get_quadrant(self, *args, **kwargs): \"\"\" \"\"\" raise NotImplemented(\"get_quadrant() is not usable as flat are", "v_: return f_ raise ValueError(f\"Unknown led with ID {ledid}\") def get_build_datapath(date, ccdid=None, ledid=None,", "qid is not None: qid = int(qid) dataccd = self.get_data(**kwargs) # this accounts", "int(ledid) in v_: return f_ raise ValueError(f\"Unknown led with ID {ledid}\") def get_build_datapath(date,", "Shall this run ztfquery.io.get_file() ? \"\"\" from ztfquery import io basename = os.path.basename(filename)", "= meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for", "self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self, *args, **kwargs): \"\"\" \"\"\" raise NotImplemented(\"get_quadrant() is not usable", "# # Methods # # ============= # def get_quadrant_data(self, rcid, **kwargs): \"\"\" \"\"\"", "{ledid}\") def get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"): \"\"\" \"\"\" # IRSA metadata from ..metadata", "# # Method # # ============== # def get_quadrant_data(self, qid, **kwargs): \"\"\" **kwargs", "import get_filepath filename = get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid) return cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod", "if \".fits\" in basename: return cls.read_fits(filename, use_dask=use_dask) else: raise NotImplementedError(f\"Only fits file loader", "build(self, corr_nl=True, corr_overscan=True, clipping=True, set_it=False, inclheader=True, **kwargs): \"\"\" \"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping,", "\"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid, as_serie=True) if type(header) == dask.dataframe.core.Series: header", "- None or '*'/'all': all quadrant return as list [1,2,3,4] **kwargs goes to", "in_meta=True) # Parsing out what to do: if groupby == \"day\": meta[groupby] =", "what to do: if groupby == \"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif groupby ==", "meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only groupby day or month implemented: {groupby} given\")", "\"\"\" for f_,v_ in LED_FILTER.items(): if int(ledid) in v_: return f_ raise ValueError(f\"Unknown", "meta.filefracday.astype(\"str\").str[:8] elif groupby == \"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only groupby day", "loader implemented (read_fits) ; {filename} given\") @classmethod def from_date(cls, date, ledid, ccdid, use_dask=True,", "else: raise ValueError(f\"Only groupby day or month implemented: {groupby} given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index()", "qid: [int or None/'*'] which quadrant you want ? - int: 1,2,3 or", "[] for i_, s_ in build_dataframe.iterrows(): # fileout = s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) #", "or 4 - None or '*'/'all': all quadrant return as list [1,2,3,4] **kwargs", "def read_fits(cls, fitsfile, use_dask=True): \"\"\" \"\"\" if use_dask: data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE,", "os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename) if \".fits\" in basename: return cls.read_fits(filename,", "\"\"\" \"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header = bflat.build(set_it=False, **kwargs) return cls(data,", "use_dask=True): \"\"\" \"\"\" _ = super().__init__(use_dask=use_dask) self.set_data(data) if header is not None: self.set_header(header)", "return cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod def read_fits(cls, fitsfile, use_dask=True): \"\"\" \"\"\" if use_dask:", "build_header(self, keys=None, refid=0, inclinput=False): \"\"\" \"\"\" from astropy.io import fits if keys is", "data_ = dataccd[:qshape[0], :qshape[1]] elif qid == 4: data_ = dataccd[:qshape[0], qshape[1]:] elif", "? \"\"\" from ztfquery import io basename = os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"): filename", "warnings from astropy.io import fits from ztfimg.base import _Image_, FocalPlane LED_FILTER = {\"zg\":[2,3,4,5],", "use_dask=use_dask, **kwargs) # ============= # # Methods # # ============= # def get_quadrant_data(self,", "FlatBuilder.from_rawfiles(files, persist=False) data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs) output = dask.delayed(fits.writeto)(fileout, data, header=header,", "I/O # # ============== # @classmethod def from_filename(cls, filename, use_dask=True, assume_exist=True): \"\"\" loads", "newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\") if inclinput: basenames = self.imgcollection.filenames for", "datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for id_, s_ in datapath.iterrows()]", "\"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only groupby day or month implemented: {groupby}", "this.set_ccd(ccd_, ccdid=ccdid) this._filenames = flatfilenames return this @classmethod def from_date(cls, date, ledid, use_dask=True,", "dataccd[:qshape[0], qshape[1]:] elif qid is None or qid in [\"*\",\"all\"]: data_ = [dataccd[qshape[0]:,", "get_quadrant_data(self, rcid, **kwargs): \"\"\" \"\"\" ccdid, qid = self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def", "if qid == 1: data_ = dataccd[qshape[0]:, qshape[1]:] elif qid == 2: data_", "# Flat Builder # # # # ==================== # from .builder import CalibrationBuilder", "# # # Flat Builder # # # # ==================== # from .builder", "Parsing out what to do: if groupby == \"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif", "from ..metadata import get_rawmeta from ..io import get_filepath meta = get_rawmeta(\"flat\", date, ccdid=ccdid,", ":qshape[1]] elif qid == 4: data_ = dataccd[:qshape[0], qshape[1]:] elif qid is None", "**kwargs): \"\"\" \"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header = bflat.build(set_it=False, **kwargs) return", "**kwargs): \"\"\" \"\"\" from ..io import get_filepath filename = get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid)", "rawfiles, **kwargs): \"\"\" \"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header = bflat.build(set_it=False, **kwargs)", "if qid is not None: qid = int(qid) dataccd = self.get_data(**kwargs) # this", "datapath def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs): \"\"\" \"\"\" if not assume_exist: from", "get_data() this then split the data. Parameters ---------- qid: [int or None/'*'] which", "\"\"\" \"\"\" # IRSA metadata from ..metadata import get_rawmeta from ..io import get_filepath", "def from_date(cls, date, ledid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath ccdids", "header = header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\") if inclinput: basenames", "1,2,3 or 4 - None or '*'/'all': all quadrant return as list [1,2,3,4]", "\"\"\" \"\"\" from ..io import get_filepath ccdids = np.arange(1,17) filenames = [get_filepath(\"flat\", date,", "before qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid == 1: data_ = dataccd[qshape[0]:, qshape[1]:]", "**kwargs): \"\"\" \"\"\" from ..io import get_filepath ccdids = np.arange(1,17) filenames = [get_filepath(\"flat\",", "get_filepath ccdids = np.arange(1,17) filenames = [get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid) for ccdid_ in", "\"\"\" \"\"\" for f_,v_ in LED_FILTER.items(): if int(ledid) in v_: return f_ raise", "assume_exist=True): \"\"\" loads the object given the input file. Parameters ---------- assume_exist: [bool]", "ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames = flatfilenames", "\"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid, as_serie=True) if type(header) == dask.dataframe.core.Series: header =", "\"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif groupby == \"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else: raise", "# -------- # # BUILDER # # -------- # def build(self, corr_nl=True, corr_overscan=True,", "read_fits(cls, fitsfile, use_dask=True): \"\"\" \"\"\" if use_dask: data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\")", "= get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True) # Parsing out what to do:", "set_it=set_it, inclheader=inclheader, **kwargs) def build_header(self, keys=None, refid=0, inclinput=False): \"\"\" \"\"\" from astropy.io import", "data_ = dataccd[:qshape[0], qshape[1]:] elif qid is None or qid in [\"*\",\"all\"]: data_", "da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else: data = fits.getdata(fitsfile) header= fits.getheader(fitsfile) this", "= [] for i_, s_ in build_dataframe.iterrows(): # fileout = s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True)", "qid == 2: data_ = dataccd[qshape[0]:, :qshape[1]] elif qid == 3: data_ =", "get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"): \"\"\" \"\"\" # IRSA metadata from ..metadata import get_rawmeta", "= Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames = flatfilenames return", "use_dask=True, assume_exist=True): \"\"\" loads the object given the input file. Parameters ---------- assume_exist:", "CCD-base. See get_quadrant_data().\") # ==================== # # # # Flat Builder # #", "import warnings from astropy.io import fits from ztfimg.base import _Image_, FocalPlane LED_FILTER =", "the ztfin2p3 pipeline screen flats \"\"\" import os import numpy as np import", "datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for id_, s_", "overwrite=True, **kwargs): \"\"\" \"\"\" if not assume_exist: from ztfquery import io outs =", "\"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid, as_serie=True) if type(header) == dask.dataframe.core.Series: header = header.compute() header", "else: data = fits.getdata(fitsfile) header= fits.getheader(fitsfile) this = cls(data=data, header=header, use_dask=use_dask) this._filename =", "\"\"\" from astropy.io import fits if keys is None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\",", "# I/O # # ============== # @classmethod def from_filename(cls, filename, use_dask=True, assume_exist=True): \"\"\"", "..io import get_filepath filename = get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid) return cls.from_filename(filename, use_dask=use_dask, **kwargs)", "\"\"\" _ = super().__init__(use_dask=use_dask) self.set_data(data) if header is not None: self.set_header(header) # ==============", "@classmethod def read_fits(cls, fitsfile, use_dask=True): \"\"\" \"\"\" if use_dask: data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile),", "return outs class Flat( _Image_ ): SHAPE = 6160, 6144 QUADRANT_SHAPE = 3080,", "None or qid in [\"*\",\"all\"]: data_ = [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]],", "**kwargs): \"\"\" \"\"\" ccdid, qid = self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self, *args,", "ccdid=ccdid_, ledid=ledid) for ccdid_ in ccdids] return cls.from_filenames(filenames, use_dask=use_dask, **kwargs) # ============= #", "FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header = bflat.build(set_it=False, **kwargs) return cls(data, header=None, use_dask=True) # ==============", "============= # def get_quadrant_data(self, rcid, **kwargs): \"\"\" \"\"\" ccdid, qid = self.rcid_to_ccdid_qid(rcid) return", "[\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid, as_serie=True) if type(header)", "build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\" \"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header = bflat.build(set_it=False,", "# Method # # ============== # def get_quadrant_data(self, qid, **kwargs): \"\"\" **kwargs goes", "in v_: return f_ raise ValueError(f\"Unknown led with ID {ledid}\") def get_build_datapath(date, ccdid=None,", "Returns ------- ndarray (numpy or dask) \"\"\" if qid in [\"*\",\"all\"]: qid =", "ledid=ledid) for ccdid_ in ccdids] return cls.from_filenames(filenames, use_dask=use_dask, **kwargs) # ============= # #", "\"\"\" \"\"\" if use_dask: data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else:", "Builder # # # # ==================== # from .builder import CalibrationBuilder class FlatBuilder(", "import dask.array as da import warnings from astropy.io import fits from ztfimg.base import", "build if needed files = s_[\"filepath\"] if not assume_exist: files = io.bulk_get_file(files) #", "{groupby} given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid),", "not basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename) if \".fits\" in basename: return cls.read_fits(filename, use_dask=use_dask) else:", "return this @classmethod def from_date(cls, date, ledid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io", "return as list [1,2,3,4] **kwargs goes to get_data() Returns ------- ndarray (numpy or", "header=header, use_dask=use_dask) this._filename = fitsfile return this @classmethod def build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\"", "= io.get_file(filename) if \".fits\" in basename: return cls.read_fits(filename, use_dask=use_dask) else: raise NotImplementedError(f\"Only fits", "\"\"\" from ..io import get_filepath ccdids = np.arange(1,17) filenames = [get_filepath(\"flat\", date, ccdid=ccdid_,", "ValueError(f\"qid must be 1->4 {qid} given\") return data_ class FlatFocalPlane( FocalPlane ): @classmethod", "# # # # Flat Builder # # # # ==================== # from", "cls(use_dask=use_dask) for file_ in flatfilenames: ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\"))", "= self.get_data(**kwargs) # this accounts for all rotation and rebin did before qshape", "not assume_exist: from ztfquery import io outs = [] for i_, s_ in", "in basename: return cls.read_fits(filename, use_dask=use_dask) else: raise NotImplementedError(f\"Only fits file loader implemented (read_fits)", "filenames = [get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid) for ccdid_ in ccdids] return cls.from_filenames(filenames, use_dask=use_dask,", "from ..io import get_filepath ccdids = np.arange(1,17) filenames = [get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid)", "ledid_to_filtername(ledid): \"\"\" \"\"\" for f_,v_ in LED_FILTER.items(): if int(ledid) in v_: return f_", "2: data_ = dataccd[qshape[0]:, :qshape[1]] elif qid == 3: data_ = dataccd[:qshape[0], :qshape[1]]", "Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames = flatfilenames return this", "\"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid, as_serie=True) if type(header) ==", "(numpy or dask) \"\"\" if qid in [\"*\",\"all\"]: qid = None if qid", "use_dask: data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else: data = fits.getdata(fitsfile)", "in [\"*\",\"all\"]: qid = None if qid is not None: qid = int(qid)", "data, header=header, overwrite=overwrite) outs.append(output) return outs class Flat( _Image_ ): SHAPE = 6160,", "from .builder import CalibrationBuilder class FlatBuilder( CalibrationBuilder ): # -------- # # BUILDER", "== dask.dataframe.core.Series: header = header.compute() header = header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num.", "def get_quadrant_data(self, rcid, **kwargs): \"\"\" \"\"\" ccdid, qid = self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs)", "# from .builder import CalibrationBuilder class FlatBuilder( CalibrationBuilder ): # -------- # #", "for i_, s_ in build_dataframe.iterrows(): # fileout = s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) # build", "if groupby == \"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif groupby == \"month\": meta[groupby] =", "not None: qid = int(qid) dataccd = self.get_data(**kwargs) # this accounts for all", "if not assume_exist: from ztfquery import io outs = [] for i_, s_", "to build the ztfin2p3 pipeline screen flats \"\"\" import os import numpy as", "date, ccdid=ccdid, ledid=ledid) return cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod def read_fits(cls, fitsfile, use_dask=True): \"\"\"", "fits.getdata(fitsfile) header= fits.getheader(fitsfile) this = cls(data=data, header=header, use_dask=use_dask) this._filename = fitsfile return this", "# # ============== # def get_quadrant_data(self, qid, **kwargs): \"\"\" **kwargs goes to get_data()", "is None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header =", "\"\"\" if use_dask: data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else: data", "groupby == \"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only groupby day or month", "ztfquery import io outs = [] for i_, s_ in build_dataframe.iterrows(): # fileout", "ID {ledid}\") def get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"): \"\"\" \"\"\" # IRSA metadata from", "def __init__(self, data, header=None, use_dask=True): \"\"\" \"\"\" _ = super().__init__(use_dask=use_dask) self.set_data(data) if header", "date, ledid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath ccdids = np.arange(1,17)", "date, ccdid=ccdid_, ledid=ledid) for ccdid_ in ccdids] return cls.from_filenames(filenames, use_dask=use_dask, **kwargs) # =============", "this = cls(use_dask=use_dask) for file_ in flatfilenames: ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid", "os import numpy as np import dask import dask.array as da import warnings", "= bflat.build(set_it=False, inclheader=inclheader, **kwargs) output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite) outs.append(output) return outs", "**kwargs) def build_header(self, keys=None, refid=0, inclinput=False): \"\"\" \"\"\" from astropy.io import fits if", "\"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs) def build_header(self, keys=None, refid=0, inclinput=False):", "= dataccd[:qshape[0], :qshape[1]] elif qid == 4: data_ = dataccd[:qshape[0], qshape[1]:] elif qid", "is not None: self.set_header(header) # ============== # # I/O # # ============== #", "CalibrationBuilder class FlatBuilder( CalibrationBuilder ): # -------- # # BUILDER # # --------", "quadrant return as list [1,2,3,4] **kwargs goes to get_data() Returns ------- ndarray (numpy", "self.get_data(**kwargs) # this accounts for all rotation and rebin did before qshape =", "meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif groupby == \"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only", "qid in [\"*\",\"all\"]: data_ = [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:]", "# def get_quadrant_data(self, qid, **kwargs): \"\"\" **kwargs goes to get_data() this then split", "import fits from ztfimg.base import _Image_, FocalPlane LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], }", "ccdid=ccdid) this._filenames = flatfilenames return this @classmethod def from_date(cls, date, ledid, use_dask=True, **kwargs):", "*args, **kwargs): \"\"\" \"\"\" raise NotImplemented(\"get_quadrant() is not usable as flat are CCD-base.", "basenames = self.imgcollection.filenames for i, basename_ in enumerate(basenames): newheader.set(f\"INPUT{i:02d}\",basename_, \"input image\") return newheader", "object given the input file. Parameters ---------- assume_exist: [bool] Shall this run ztfquery.io.get_file()", "this then split the data. Parameters ---------- qid: [int or None/'*'] which quadrant", "- int: 1,2,3 or 4 - None or '*'/'all': all quadrant return as", "keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\", \"OBSERVAT\"] header = self.imgcollection.get_singleheader(refid, as_serie=True)", "led with ID {ledid}\") def get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"): \"\"\" \"\"\" # IRSA", "use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath ccdids = np.arange(1,17) filenames =", "6144 QUADRANT_SHAPE = 3080, 3072 def __init__(self, data, header=None, use_dask=True): \"\"\" \"\"\" _", "goes to get_data() this then split the data. Parameters ---------- qid: [int or", "dask import dask.array as da import warnings from astropy.io import fits from ztfimg.base", "use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath filename = get_filepath(\"flat\", date, ccdid=ccdid,", "pipeline screen flats \"\"\" import os import numpy as np import dask import", "bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False) data, header = bflat.build(set_it=False, **kwargs) return cls(data, header=None, use_dask=True)", "= io.bulk_get_file(files) # bflat = FlatBuilder.from_rawfiles(files, persist=False) data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs)", "io outs = [] for i_, s_ in build_dataframe.iterrows(): # fileout = s_.fileout", "or dask) \"\"\" if qid in [\"*\",\"all\"]: qid = None if qid is", "\"\"\" \"\"\" this = cls(use_dask=use_dask) for file_ in flatfilenames: ccd_ = Flat.from_filename(file_, use_dask=use_dask,", "bflat.build(set_it=False, **kwargs) return cls(data, header=None, use_dask=True) # ============== # # Method # #", "ccdids] return cls.from_filenames(filenames, use_dask=use_dask, **kwargs) # ============= # # Methods # # =============", "return cls.read_fits(filename, use_dask=use_dask) else: raise NotImplementedError(f\"Only fits file loader implemented (read_fits) ; {filename}", "[get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for id_, s_ in datapath.iterrows()] return datapath def", "corr_nl=True, corr_overscan=True, clipping=True, set_it=False, inclheader=True, **kwargs): \"\"\" \"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it,", "implemented: {groupby} given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]),", "dataccd[:qshape[0], :qshape[1]] elif qid == 4: data_ = dataccd[:qshape[0], qshape[1]:] elif qid is", "return cls.from_filenames(filenames, use_dask=use_dask, **kwargs) # ============= # # Methods # # ============= #", "usable as flat are CCD-base. See get_quadrant_data().\") # ==================== # # # #", "run ztfquery.io.get_file() ? \"\"\" from ztfquery import io basename = os.path.basename(filename) if not", "# @classmethod def from_filename(cls, filename, use_dask=True, assume_exist=True): \"\"\" loads the object given the", "import io basename = os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename) if \".fits\"", "or None/'*'] which quadrant you want ? - int: 1,2,3 or 4 -", "persist=False) data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs) output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite)", "files = s_[\"filepath\"] if not assume_exist: files = io.bulk_get_file(files) # bflat = FlatBuilder.from_rawfiles(files,", "ledid=ledid, getwhat=\"filepath\", in_meta=True) # Parsing out what to do: if groupby == \"day\":", "raise ValueError(f\"qid must be 1->4 {qid} given\") return data_ class FlatFocalPlane( FocalPlane ):", "qid = self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self, *args, **kwargs): \"\"\" \"\"\" raise", "io.bulk_get_file(files) # bflat = FlatBuilder.from_rawfiles(files, persist=False) data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs) output", "import get_filepath meta = get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True) # Parsing out", "**kwargs) output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite) outs.append(output) return outs class Flat( _Image_", "fits from ztfimg.base import _Image_, FocalPlane LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10], \"zi\":[11,12,13], } def", "flatfilenames return this @classmethod def from_date(cls, date, ledid, use_dask=True, **kwargs): \"\"\" \"\"\" from", "**kwargs): \"\"\" \"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs) def build_header(self, keys=None,", "input file. Parameters ---------- assume_exist: [bool] Shall this run ztfquery.io.get_file() ? \"\"\" from", "= dataccd[:qshape[0], qshape[1]:] elif qid is None or qid in [\"*\",\"all\"]: data_ =", "[get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid) for ccdid_ in ccdids] return cls.from_filenames(filenames, use_dask=use_dask, **kwargs) #", "elif qid == 3: data_ = dataccd[:qshape[0], :qshape[1]] elif qid == 4: data_", "from astropy.io import fits from ztfimg.base import _Image_, FocalPlane LED_FILTER = {\"zg\":[2,3,4,5], \"zr\":[7,8,9,10],", "============== # # I/O # # ============== # @classmethod def from_filename(cls, filename, use_dask=True,", "assume_exist: files = io.bulk_get_file(files) # bflat = FlatBuilder.from_rawfiles(files, persist=False) data, header = bflat.build(set_it=False,", "# ============== # # I/O # # ============== # @classmethod def from_filename(cls, filename,", "accounts for all rotation and rebin did before qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if", "**kwargs) return cls(data, header=None, use_dask=True) # ============== # # Method # # ==============", "== 4: data_ = dataccd[:qshape[0], qshape[1]:] elif qid is None or qid in", "fits if keys is None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\", \"PROGRMID\",\"FILTERID\", \"FILTER\",\"FILTPOS\",\"RA\",\"DEC\",", "da import warnings from astropy.io import fits from ztfimg.base import _Image_, FocalPlane LED_FILTER", "\"zi\":[11,12,13], } def ledid_to_filtername(ledid): \"\"\" \"\"\" for f_,v_ in LED_FILTER.items(): if int(ledid) in", "return f_ raise ValueError(f\"Unknown led with ID {ledid}\") def get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"):", "{qid} given\") return data_ class FlatFocalPlane( FocalPlane ): @classmethod def from_filenames(cls, flatfilenames, use_dask=True,", "= meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only groupby day or month implemented: {groupby} given\") datapath", "astropy.io import fits if keys is None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\", \"ILUMWAVE\",", "from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath filename", "flatfilenames, use_dask=True, **kwargs): \"\"\" \"\"\" this = cls(use_dask=use_dask) for file_ in flatfilenames: ccd_", "None or '*'/'all': all quadrant return as list [1,2,3,4] **kwargs goes to get_data()", "assume_exist: [bool] Shall this run ztfquery.io.get_file() ? \"\"\" from ztfquery import io basename", "qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:] ] else: raise ValueError(f\"qid must be", "not assume_exist: files = io.bulk_get_file(files) # bflat = FlatBuilder.from_rawfiles(files, persist=False) data, header =", "to get_data() this then split the data. Parameters ---------- qid: [int or None/'*']", "flatfilenames: ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames =", "\"\"\" from ztfquery import io basename = os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"): filename =", "from ztfquery import io basename = os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename)", "@classmethod def from_filename(cls, filename, use_dask=True, assume_exist=True): \"\"\" loads the object given the input", "clipping=True, set_it=False, inclheader=True, **kwargs): \"\"\" \"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs)", "self.set_data(data) if header is not None: self.set_header(header) # ============== # # I/O #", "if qid in [\"*\",\"all\"]: qid = None if qid is not None: qid", "# # Flat Builder # # # # ==================== # from .builder import", "to do: if groupby == \"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8] elif groupby == \"month\":", "return this @classmethod def build_from_rawfiles(cls, rawfiles, **kwargs): \"\"\" \"\"\" bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False)", "qid = None if qid is not None: qid = int(qid) dataccd =", "dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:] ] else: raise ValueError(f\"qid must be 1->4 {qid} given\")", "\"\"\" \"\"\" ccdid, qid = self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self, *args, **kwargs):", "or month implemented: {groupby} given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] =", "given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid),", "rotation and rebin did before qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid == 1:", "= get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid) return cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod def read_fits(cls, fitsfile,", "as da import warnings from astropy.io import fits from ztfimg.base import _Image_, FocalPlane", "bflat.build(set_it=False, inclheader=inclheader, **kwargs) output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite) outs.append(output) return outs class", "QUADRANT_SHAPE = 3080, 3072 def __init__(self, data, header=None, use_dask=True): \"\"\" \"\"\" _ =", "[dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:] ] else: raise ValueError(f\"qid must", "ledid=ledid) return cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod def read_fits(cls, fitsfile, use_dask=True): \"\"\" \"\"\" if", "..io import get_filepath ccdids = np.arange(1,17) filenames = [get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid) for", "= np.arange(1,17) filenames = [get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid) for ccdid_ in ccdids] return", "in ccdids] return cls.from_filenames(filenames, use_dask=use_dask, **kwargs) # ============= # # Methods # #", "\"\"\" raise NotImplemented(\"get_quadrant() is not usable as flat are CCD-base. See get_quadrant_data().\") #", "inclheader=inclheader, **kwargs) def build_header(self, keys=None, refid=0, inclinput=False): \"\"\" \"\"\" from astropy.io import fits", "# # ==================== # from .builder import CalibrationBuilder class FlatBuilder( CalibrationBuilder ): #", "header=None, use_dask=True): \"\"\" \"\"\" _ = super().__init__(use_dask=use_dask) self.set_data(data) if header is not None:", "{filename} given\") @classmethod def from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs): \"\"\" \"\"\" from", "io basename = os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename) if \".fits\" in", "you want ? - int: 1,2,3 or 4 - None or '*'/'all': all", "get_data() Returns ------- ndarray (numpy or dask) \"\"\" if qid in [\"*\",\"all\"]: qid", "as_serie=True) if type(header) == dask.dataframe.core.Series: header = header.compute() header = header.loc[keys] newheader =", "qshape[1]:] elif qid is None or qid in [\"*\",\"all\"]: data_ = [dataccd[qshape[0]:, qshape[1]:],", "s_[\"filepath\"] if not assume_exist: files = io.bulk_get_file(files) # bflat = FlatBuilder.from_rawfiles(files, persist=False) data,", "list [1,2,3,4] **kwargs goes to get_data() Returns ------- ndarray (numpy or dask) \"\"\"", "= meta.filefracday.astype(\"str\").str[:8] elif groupby == \"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only groupby", "header = bflat.build(set_it=False, inclheader=inclheader, **kwargs) output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite) outs.append(output) return", "cls(data=data, header=header, use_dask=use_dask) this._filename = fitsfile return this @classmethod def build_from_rawfiles(cls, rawfiles, **kwargs):", "# # ============== # @classmethod def from_filename(cls, filename, use_dask=True, assume_exist=True): \"\"\" loads the", "FlatBuilder( CalibrationBuilder ): # -------- # # BUILDER # # -------- # def", "= self.imgcollection.get_singleheader(refid, as_serie=True) if type(header) == dask.dataframe.core.Series: header = header.compute() header = header.loc[keys]", "get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid) return cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod def read_fits(cls, fitsfile, use_dask=True):", "__init__(self, data, header=None, use_dask=True): \"\"\" \"\"\" _ = super().__init__(use_dask=use_dask) self.set_data(data) if header is", "= [get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid) for ccdid_ in ccdids] return cls.from_filenames(filenames, use_dask=use_dask, **kwargs)", "header=None, use_dask=True) # ============== # # Method # # ============== # def get_quadrant_data(self,", "return data_ class FlatFocalPlane( FocalPlane ): @classmethod def from_filenames(cls, flatfilenames, use_dask=True, **kwargs): \"\"\"", "if inclinput: basenames = self.imgcollection.filenames for i, basename_ in enumerate(basenames): newheader.set(f\"INPUT{i:02d}\",basename_, \"input image\")", "id_, s_ in datapath.iterrows()] return datapath def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs): \"\"\"", "i_, s_ in build_dataframe.iterrows(): # fileout = s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if", "qid == 1: data_ = dataccd[qshape[0]:, qshape[1]:] elif qid == 2: data_ =", "file_ in flatfilenames: ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs) ccdid = int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid)", "else: raise NotImplementedError(f\"Only fits file loader implemented (read_fits) ; {filename} given\") @classmethod def", "type(header) == dask.dataframe.core.Series: header = header.compute() header = header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages,", "\"\"\" if qid in [\"*\",\"all\"]: qid = None if qid is not None:", "def from_filename(cls, filename, use_dask=True, assume_exist=True): \"\"\" loads the object given the input file.", "from_filenames(cls, flatfilenames, use_dask=True, **kwargs): \"\"\" \"\"\" this = cls(use_dask=use_dask) for file_ in flatfilenames:", "set_it=False, inclheader=True, **kwargs): \"\"\" \"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs) def", "# Parsing out what to do: if groupby == \"day\": meta[groupby] = meta.filefracday.astype(\"str\").str[:8]", "raise ValueError(f\"Unknown led with ID {ledid}\") def get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"): \"\"\" \"\"\"", "self.imgcollection.get_singleheader(refid, as_serie=True) if type(header) == dask.dataframe.core.Series: header = header.compute() header = header.loc[keys] newheader", "elif groupby == \"month\": meta[groupby] = meta.filefracday.astype(\"str\").str[:6] else: raise ValueError(f\"Only groupby day or", "goes to get_data() Returns ------- ndarray (numpy or dask) \"\"\" if qid in", "np import dask import dask.array as da import warnings from astropy.io import fits", "\"\"\" **kwargs goes to get_data() this then split the data. Parameters ---------- qid:", "**kwargs) # ============= # # Methods # # ============= # def get_quadrant_data(self, rcid,", "int(file_.split(\"_\")[-3].replace(\"c\",\"\")) this.set_ccd(ccd_, ccdid=ccdid) this._filenames = flatfilenames return this @classmethod def from_date(cls, date, ledid,", "for all rotation and rebin did before qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid", "\"\"\" ccdid, qid = self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self, *args, **kwargs): \"\"\"", "persist=False) data, header = bflat.build(set_it=False, **kwargs) return cls(data, header=None, use_dask=True) # ============== #", "data = fits.getdata(fitsfile) header= fits.getheader(fitsfile) this = cls(data=data, header=header, use_dask=use_dask) this._filename = fitsfile", "# # I/O # # ============== # @classmethod def from_filename(cls, filename, use_dask=True, assume_exist=True):", "get_rawmeta from ..io import get_filepath meta = get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True)", "np.arange(1,17) filenames = [get_filepath(\"flat\", date, ccdid=ccdid_, ledid=ledid) for ccdid_ in ccdids] return cls.from_filenames(filenames,", "dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:] ] else: raise ValueError(f\"qid must be 1->4", "**kwargs goes to get_data() this then split the data. Parameters ---------- qid: [int", "Methods # # ============= # def get_quadrant_data(self, rcid, **kwargs): \"\"\" \"\"\" ccdid, qid", "= 6160, 6144 QUADRANT_SHAPE = 3080, 3072 def __init__(self, data, header=None, use_dask=True): \"\"\"", "---------- qid: [int or None/'*'] which quadrant you want ? - int: 1,2,3", "# # -------- # def build(self, corr_nl=True, corr_overscan=True, clipping=True, set_it=False, inclheader=True, **kwargs): \"\"\"", "super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs) def build_header(self, keys=None, refid=0, inclinput=False): \"\"\" \"\"\"", ":qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:] ] else: raise ValueError(f\"qid must be 1->4 {qid}", "-------- # def build(self, corr_nl=True, corr_overscan=True, clipping=True, set_it=False, inclheader=True, **kwargs): \"\"\" \"\"\" return", "ValueError(f\"Unknown led with ID {ledid}\") def get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"): \"\"\" \"\"\" #", "============== # def get_quadrant_data(self, qid, **kwargs): \"\"\" **kwargs goes to get_data() this then", "# # # ==================== # from .builder import CalibrationBuilder class FlatBuilder( CalibrationBuilder ):", "\"\"\" \"\"\" raise NotImplemented(\"get_quadrant() is not usable as flat are CCD-base. See get_quadrant_data().\")", "= s_.fileout os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed files = s_[\"filepath\"] if not", "============= # # Methods # # ============= # def get_quadrant_data(self, rcid, **kwargs): \"\"\"", "import os import numpy as np import dask import dask.array as da import", "[\"*\",\"all\"]: data_ = [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0], :qshape[1]], dataccd[:qshape[0], qshape[1]:] ] else:", "data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else: data = fits.getdata(fitsfile) header=", "\"\"\" import os import numpy as np import dask import dask.array as da", "ledid=int(s_.ledid), filtername=s_.filtername) for id_, s_ in datapath.iterrows()] return datapath def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False,", "qshape[1]:] ] else: raise ValueError(f\"qid must be 1->4 {qid} given\") return data_ class", "use_dask=use_dask, **kwargs) @classmethod def read_fits(cls, fitsfile, use_dask=True): \"\"\" \"\"\" if use_dask: data =", "if use_dask: data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile), shape=cls.SHAPE, dtype=\"float\") header= dask.delayed(fits.getheader)(fitsfile) else: data =", "import io outs = [] for i_, s_ in build_dataframe.iterrows(): # fileout =", "outs = [] for i_, s_ in build_dataframe.iterrows(): # fileout = s_.fileout os.makedirs(os.path.dirname(fileout),", "\"num. input images\") if inclinput: basenames = self.imgcollection.filenames for i, basename_ in enumerate(basenames):", "def from_filenames(cls, flatfilenames, use_dask=True, **kwargs): \"\"\" \"\"\" this = cls(use_dask=use_dask) for file_ in", "data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs) output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite) outs.append(output)", "def from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath", "assume_exist: from ztfquery import io outs = [] for i_, s_ in build_dataframe.iterrows():", "keys=None, refid=0, inclinput=False): \"\"\" \"\"\" from astropy.io import fits if keys is None:", "= os.path.basename(filename) if not basename.startswith(\"ztfin2p3\"): filename = io.get_file(filename) if \".fits\" in basename: return", "def get_build_datapath(date, ccdid=None, ledid=None, groupby=\"day\"): \"\"\" \"\"\" # IRSA metadata from ..metadata import", "import get_rawmeta from ..io import get_filepath meta = get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\",", "header=header, overwrite=overwrite) outs.append(output) return outs class Flat( _Image_ ): SHAPE = 6160, 6144", "fits file loader implemented (read_fits) ; {filename} given\") @classmethod def from_date(cls, date, ledid,", "the input file. Parameters ---------- assume_exist: [bool] Shall this run ztfquery.io.get_file() ? \"\"\"", "(read_fits) ; {filename} given\") @classmethod def from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs): \"\"\"", "cls(data, header=None, use_dask=True) # ============== # # Method # # ============== # def", "elif qid == 4: data_ = dataccd[:qshape[0], qshape[1]:] elif qid is None or", "inclheader=True, **kwargs): \"\"\" \"\"\" return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs) def build_header(self,", "= np.asarray(np.asarray(dataccd.shape)/2, dtype=\"int\") if qid == 1: data_ = dataccd[qshape[0]:, qshape[1]:] elif qid", "return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self, *args, **kwargs): \"\"\" \"\"\" raise NotImplemented(\"get_quadrant() is not", "ccdid=ccdid, ledid=ledid) return cls.from_filename(filename, use_dask=use_dask, **kwargs) @classmethod def read_fits(cls, fitsfile, use_dask=True): \"\"\" \"\"\"", "dask.dataframe.core.Series: header = header.compute() header = header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input", "str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for id_, s_ in datapath.iterrows()] return datapath def build_from_datapath(build_dataframe,", "given the input file. Parameters ---------- assume_exist: [bool] Shall this run ztfquery.io.get_file() ?", "\"zr\":[7,8,9,10], \"zi\":[11,12,13], } def ledid_to_filtername(ledid): \"\"\" \"\"\" for f_,v_ in LED_FILTER.items(): if int(ledid)", "io.get_file(filename) if \".fits\" in basename: return cls.read_fits(filename, use_dask=use_dask) else: raise NotImplementedError(f\"Only fits file", "qid in [\"*\",\"all\"]: qid = None if qid is not None: qid =", "1->4 {qid} given\") return data_ class FlatFocalPlane( FocalPlane ): @classmethod def from_filenames(cls, flatfilenames,", "from_date(cls, date, ledid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath ccdids =", "from astropy.io import fits if keys is None: keys = [\"ORIGIN\",\"OBSERVER\",\"INSTRUME\",\"IMGTYPE\",\"EXPTIME\", \"CCDSUM\",\"CCD_ID\",\"CCDNAME\",\"PIXSCALE\",\"PIXSCALX\",\"PIXSCALY\", \"FRAMENUM\",\"ILUM_LED\",", "os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed files = s_[\"filepath\"] if not assume_exist: files", "input images\") if inclinput: basenames = self.imgcollection.filenames for i, basename_ in enumerate(basenames): newheader.set(f\"INPUT{i:02d}\",basename_,", "month implemented: {groupby} given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"] = [get_filepath(\"flat\",", "inclinput=False): \"\"\" \"\"\" from astropy.io import fits if keys is None: keys =", "..io import get_filepath meta = get_rawmeta(\"flat\", date, ccdid=ccdid, ledid=ledid, getwhat=\"filepath\", in_meta=True) # Parsing", "@classmethod def from_filenames(cls, flatfilenames, use_dask=True, **kwargs): \"\"\" \"\"\" this = cls(use_dask=use_dask) for file_", "return super().build(corr_nl=corr_nl, corr_overscan=corr_overscan, clipping=clipping, set_it=set_it, inclheader=inclheader, **kwargs) def build_header(self, keys=None, refid=0, inclinput=False): \"\"\"", "use_dask=use_dask) else: raise NotImplementedError(f\"Only fits file loader implemented (read_fits) ; {filename} given\") @classmethod", "def build_header(self, keys=None, refid=0, inclinput=False): \"\"\" \"\"\" from astropy.io import fits if keys", "ledid, use_dask=True, **kwargs): \"\"\" \"\"\" from ..io import get_filepath ccdids = np.arange(1,17) filenames", "**kwargs goes to get_data() Returns ------- ndarray (numpy or dask) \"\"\" if qid", "self.rcid_to_ccdid_qid(rcid) return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs) def get_quadrant(self, *args, **kwargs): \"\"\" \"\"\" raise NotImplemented(\"get_quadrant() is", "= header.compute() header = header.loc[keys] newheader = fits.Header(header.loc[keys].to_dict()) newheader.set(f\"NINPUTS\",self.imgcollection.nimages, \"num. input images\") if", "Flat Builder # # # # ==================== # from .builder import CalibrationBuilder class", "in LED_FILTER.items(): if int(ledid) in v_: return f_ raise ValueError(f\"Unknown led with ID", "is None or qid in [\"*\",\"all\"]: data_ = [dataccd[qshape[0]:, qshape[1]:], dataccd[qshape[0]:, :qshape[1]], dataccd[:qshape[0],", "header = bflat.build(set_it=False, **kwargs) return cls(data, header=None, use_dask=True) # ============== # # Method", "class FlatFocalPlane( FocalPlane ): @classmethod def from_filenames(cls, flatfilenames, use_dask=True, **kwargs): \"\"\" \"\"\" this", "FlatFocalPlane( FocalPlane ): @classmethod def from_filenames(cls, flatfilenames, use_dask=True, **kwargs): \"\"\" \"\"\" this =", "from ..io import get_filepath filename = get_filepath(\"flat\", date, ccdid=ccdid, ledid=ledid) return cls.from_filename(filename, use_dask=use_dask,", "day or month implemented: {groupby} given\") datapath = meta.groupby([groupby,\"ccdid\",\"ledid\"])[\"filepath\"].apply(list).reset_index() datapath[\"filtername\"] = datapath[\"ledid\"].apply(ledid_to_filtername) datapath[\"fileout\"]", "this = cls(data=data, header=header, use_dask=use_dask) this._filename = fitsfile return this @classmethod def build_from_rawfiles(cls,", "self.set_header(header) # ============== # # I/O # # ============== # @classmethod def from_filename(cls,", "; {filename} given\") @classmethod def from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs): \"\"\" \"\"\"", "\"\"\" if not assume_exist: from ztfquery import io outs = [] for i_,", "3072 def __init__(self, data, header=None, use_dask=True): \"\"\" \"\"\" _ = super().__init__(use_dask=use_dask) self.set_data(data) if", "= [get_filepath(\"flat\", str(s_[groupby]), ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername) for id_, s_ in datapath.iterrows()] return datapath" ]
[ "elif left == 'iyr': print( 'iyr', parts_found) if len(right) == 4 and (int(right)", "len(right) == 4 and (int(right) >= 1920 and int(right) <= 2002): parts_found +=1", "chunck split and check if valid for text in chuncks: parts = text.split()", "valid and right[1] in valid_chars valid = valid and right[2] in valid_chars valid", "print( 'ecl', parts_found) valid_eye_color = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] if", "unit == 'in': if int(num) >= 59 and int(num) <= 76: parts_found +=1", "for part in parts: parts = part.split(':') left = parts[0] right = parts[1]", "left == 'iyr': print( 'iyr', parts_found) if len(right) == 4 and (int(right) >=", "valid for text in chuncks: parts = text.split() print(text) print(parts) parts_found = 0", "right[:-2] unit = right[-2:] if unit == 'cm': if int(num) >= 150 and", "'iyr': print( 'iyr', parts_found) if len(right) == 4 and (int(right) >= 2010 and", "left == 'byr': print( 'byr', parts_found) if len(right) == 4 and (int(right) >=", "valid and right[2] in valid_chars valid = valid and right[3] in valid_chars valid", "parts_found) if len(right) == 9: parts_found +=1 else: print('invalid pid') invalid = True", "and int(right) <= 2030): parts_found +=1 else: print('invalid eyr') invalid = True elif", "else: print('invalid unit') invalid = True print(parts_found, invalid) if parts_found == 7 and", "print( 'pid', parts_found) if len(right) == 9: parts_found +=1 else: print('invalid pid') invalid", "'pid': print( 'pid', parts_found) if len(right) == 9: parts_found +=1 else: print('invalid pid')", "valid = valid and right[6] in valid_chars if valid: parts_found += 1 else:", "parts_found) num = right[:-2] unit = right[-2:] if unit == 'cm': if int(num)", "True elif left == 'cid': has_cid = True elif left == 'hgt': print('hgt',", "parts[0] right = parts[1] print(left, right) if left == 'ecl': print( 'ecl', parts_found)", "<= 2030): parts_found +=1 else: print('invalid eyr') invalid = True elif left ==", "parts_found) if len(right) == 4 and (int(right) >= 2010 and int(right) <= 2020):", "unit = right[-2:] if unit == 'cm': if int(num) >= 150 and int(num)", "valid_chars if valid: parts_found += 1 else: print('invalid hcl') invalid = True elif", "True print(parts_found, invalid) if parts_found == 7 and not invalid: valid_count+=1 print(\"valid passports:\",", "= True valid_chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8',", "split and check if valid for text in chuncks: parts = text.split() print(text)", "False invalid = False for part in parts: parts = part.split(':') left =", "76: parts_found +=1 else: print('invalid cm') invalid = True else: print('invalid unit') invalid", "and right[4] in valid_chars valid = valid and right[5] in valid_chars valid =", "= valid and right[1] in valid_chars valid = valid and right[2] in valid_chars", "right in valid_eye_color: parts_found +=1 else: print('invalid eye') invalid = True elif left", "len(right) == 4 and (int(right) >= 2010 and int(right) <= 2020): parts_found +=1", "'hgt': print('hgt', parts_found) num = right[:-2] unit = right[-2:] if unit == 'cm':", "and int(num) <= 76: parts_found +=1 else: print('invalid cm') invalid = True else:", "text.split() print(text) print(parts) parts_found = 0 has_cid = False invalid = False for", "f: all_text = f.read() chuncks = all_text.split('\\n\\n') # print(chuncks) valid_count = 0 #", "parts_found +=1 else: print('invalid eyr') invalid = True elif left == 'hcl': print(", "+= 1 else: print('invalid hcl') invalid = True elif left == 'byr': print(", "valid_chars valid = valid and right[5] in valid_chars valid = valid and right[6]", "['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] if right in valid_eye_color: parts_found +=1", "'grn', 'hzl', 'oth'] if right in valid_eye_color: parts_found +=1 else: print('invalid eye') invalid", "elif left == 'pid': print( 'pid', parts_found) if len(right) == 9: parts_found +=1", "1 else: print('invalid hcl') invalid = True elif left == 'byr': print( 'byr',", "'b', 'c', 'd', 'e', 'f'] valid = valid and right[0] == '#' valid", "and int(right) <= 2002): parts_found +=1 else: print('invalid byr') invalid = True elif", "if int(num) >= 59 and int(num) <= 76: parts_found +=1 else: print('invalid cm')", "== 4 and (int(right) >= 2010 and int(right) <= 2020): parts_found +=1 else:", "2002): parts_found +=1 else: print('invalid byr') invalid = True elif left == 'iyr':", "left == 'eyr': print( 'eyr', parts_found) if len(right) == 4 and (int(right) >=", "valid_chars valid = valid and right[6] in valid_chars if valid: parts_found += 1", "+=1 else: print('invalid eye') invalid = True elif left == 'pid': print( 'pid',", "if parts_found == 7 and not invalid: valid_count+=1 print(\"valid passports:\", valid_count) # end", "right[0] == '#' valid = valid and right[1] in valid_chars valid = valid", "'eyr', parts_found) if len(right) == 4 and (int(right) >= 2020 and int(right) <=", "parts_found +=1 else: print('invalid iyr') invalid = True elif left == 'cid': has_cid", "if len(right) == 4 and (int(right) >= 2020 and int(right) <= 2030): parts_found", "== 'hcl': print( 'hcl', parts_found) valid = True valid_chars = ['0', '1', '2',", "False for part in parts: parts = part.split(':') left = parts[0] right =", "'e', 'f'] valid = valid and right[0] == '#' valid = valid and", "parts_found +=1 else: print('invalid pid') invalid = True elif left == 'eyr': print(", "invalid = True elif left == 'eyr': print( 'eyr', parts_found) if len(right) ==", ">= 2020 and int(right) <= 2030): parts_found +=1 else: print('invalid eyr') invalid =", "= valid and right[2] in valid_chars valid = valid and right[3] in valid_chars", "parts_found += 1 else: print('invalid hcl') invalid = True elif left == 'byr':", "in chuncks: parts = text.split() print(text) print(parts) parts_found = 0 has_cid = False", "left == 'hcl': print( 'hcl', parts_found) valid = True valid_chars = ['0', '1',", "and right[2] in valid_chars valid = valid and right[3] in valid_chars valid =", "= True elif left == 'cid': has_cid = True elif left == 'hgt':", "= True print(parts_found, invalid) if parts_found == 7 and not invalid: valid_count+=1 print(\"valid", "2030): parts_found +=1 else: print('invalid eyr') invalid = True elif left == 'hcl':", "invalid = True elif unit == 'in': if int(num) >= 59 and int(num)", "+=1 else: print('invalid byr') invalid = True elif left == 'iyr': print( 'iyr',", "'cm': if int(num) >= 150 and int(num) <=193: parts_found +=1 else: print('invalid cm')", "valid_eye_color = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] if right in valid_eye_color:", "parts_found +=1 else: print('invalid cm') invalid = True else: print('invalid unit') invalid =", "valid and right[0] == '#' valid = valid and right[1] in valid_chars valid", "open('python\\\\2020\\day4\\data.txt') as f: all_text = f.read() chuncks = all_text.split('\\n\\n') # print(chuncks) valid_count =", "right[4] in valid_chars valid = valid and right[5] in valid_chars valid = valid", "else: print('invalid cm') invalid = True else: print('invalid unit') invalid = True print(parts_found,", "right = parts[1] print(left, right) if left == 'ecl': print( 'ecl', parts_found) valid_eye_color", "eye') invalid = True elif left == 'pid': print( 'pid', parts_found) if len(right)", "print( 'byr', parts_found) if len(right) == 4 and (int(right) >= 1920 and int(right)", "+=1 else: print('invalid cm') invalid = True elif unit == 'in': if int(num)", "parts_found == 7 and not invalid: valid_count+=1 print(\"valid passports:\", valid_count) # end part", "= True elif left == 'hcl': print( 'hcl', parts_found) valid = True valid_chars", "= right[:-2] unit = right[-2:] if unit == 'cm': if int(num) >= 150", "cm') invalid = True else: print('invalid unit') invalid = True print(parts_found, invalid) if", "== 9: parts_found +=1 else: print('invalid pid') invalid = True elif left ==", "'hcl', parts_found) valid = True valid_chars = ['0', '1', '2', '3', '4', '5',", "2020): parts_found +=1 else: print('invalid iyr') invalid = True elif left == 'cid':", "in parts: parts = part.split(':') left = parts[0] right = parts[1] print(left, right)", "'4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] valid", "hcl') invalid = True elif left == 'byr': print( 'byr', parts_found) if len(right)", "int(right) <= 2002): parts_found +=1 else: print('invalid byr') invalid = True elif left", "== 'byr': print( 'byr', parts_found) if len(right) == 4 and (int(right) >= 1920", "'7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] valid = valid and", "right[1] in valid_chars valid = valid and right[2] in valid_chars valid = valid", "has_cid = False invalid = False for part in parts: parts = part.split(':')", "unit == 'cm': if int(num) >= 150 and int(num) <=193: parts_found +=1 else:", "valid = valid and right[2] in valid_chars valid = valid and right[3] in", "valid_chars valid = valid and right[4] in valid_chars valid = valid and right[5]", "eyr') invalid = True elif left == 'hcl': print( 'hcl', parts_found) valid =", "iyr') invalid = True elif left == 'cid': has_cid = True elif left", "parts_found +=1 else: print('invalid eye') invalid = True elif left == 'pid': print(", "and right[3] in valid_chars valid = valid and right[4] in valid_chars valid =", "'5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] valid =", "= 0 has_cid = False invalid = False for part in parts: parts", "'ecl', parts_found) valid_eye_color = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] if right", "len(right) == 4 and (int(right) >= 2020 and int(right) <= 2030): parts_found +=1", "parts: parts = part.split(':') left = parts[0] right = parts[1] print(left, right) if", "parts_found) valid_eye_color = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] if right in", "= False invalid = False for part in parts: parts = part.split(':') left", "print('invalid hcl') invalid = True elif left == 'byr': print( 'byr', parts_found) if", "if valid for text in chuncks: parts = text.split() print(text) print(parts) parts_found =", "valid_eye_color: parts_found +=1 else: print('invalid eye') invalid = True elif left == 'pid':", "== 'cid': has_cid = True elif left == 'hgt': print('hgt', parts_found) num =", "'6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] valid = valid", "'ecl': print( 'ecl', parts_found) valid_eye_color = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']", "= valid and right[4] in valid_chars valid = valid and right[5] in valid_chars", "True elif unit == 'in': if int(num) >= 59 and int(num) <= 76:", "= True else: print('invalid unit') invalid = True print(parts_found, invalid) if parts_found ==", "all_text.split('\\n\\n') # print(chuncks) valid_count = 0 # for each chunck split and check", ">= 2010 and int(right) <= 2020): parts_found +=1 else: print('invalid iyr') invalid =", "in valid_chars valid = valid and right[5] in valid_chars valid = valid and", "(int(right) >= 2020 and int(right) <= 2030): parts_found +=1 else: print('invalid eyr') invalid", "in valid_chars valid = valid and right[3] in valid_chars valid = valid and", "len(right) == 9: parts_found +=1 else: print('invalid pid') invalid = True elif left", "(int(right) >= 1920 and int(right) <= 2002): parts_found +=1 else: print('invalid byr') invalid", "= 0 # for each chunck split and check if valid for text", "if valid: parts_found += 1 else: print('invalid hcl') invalid = True elif left", "'eyr': print( 'eyr', parts_found) if len(right) == 4 and (int(right) >= 2020 and", "0 has_cid = False invalid = False for part in parts: parts =", "in valid_eye_color: parts_found +=1 else: print('invalid eye') invalid = True elif left ==", "print('invalid pid') invalid = True elif left == 'eyr': print( 'eyr', parts_found) if", "True else: print('invalid unit') invalid = True print(parts_found, invalid) if parts_found == 7", "== 4 and (int(right) >= 1920 and int(right) <= 2002): parts_found +=1 else:", "invalid = True elif left == 'iyr': print( 'iyr', parts_found) if len(right) ==", "chuncks: parts = text.split() print(text) print(parts) parts_found = 0 has_cid = False invalid", "part.split(':') left = parts[0] right = parts[1] print(left, right) if left == 'ecl':", "valid = valid and right[0] == '#' valid = valid and right[1] in", "'iyr', parts_found) if len(right) == 4 and (int(right) >= 2010 and int(right) <=", "f.read() chuncks = all_text.split('\\n\\n') # print(chuncks) valid_count = 0 # for each chunck", "elif left == 'eyr': print( 'eyr', parts_found) if len(right) == 4 and (int(right)", "= True elif unit == 'in': if int(num) >= 59 and int(num) <=", "num = right[:-2] unit = right[-2:] if unit == 'cm': if int(num) >=", "unit') invalid = True print(parts_found, invalid) if parts_found == 7 and not invalid:", "= ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] if right in valid_eye_color: parts_found", "True elif left == 'pid': print( 'pid', parts_found) if len(right) == 9: parts_found", "== 4 and (int(right) >= 2020 and int(right) <= 2030): parts_found +=1 else:", "invalid = True elif left == 'hcl': print( 'hcl', parts_found) valid = True", "int(num) >= 59 and int(num) <= 76: parts_found +=1 else: print('invalid cm') invalid", "and (int(right) >= 2020 and int(right) <= 2030): parts_found +=1 else: print('invalid eyr')", "if int(num) >= 150 and int(num) <=193: parts_found +=1 else: print('invalid cm') invalid", "'8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] valid = valid and right[0]", "parts_found) valid = True valid_chars = ['0', '1', '2', '3', '4', '5', '6',", ">= 59 and int(num) <= 76: parts_found +=1 else: print('invalid cm') invalid =", "valid and right[6] in valid_chars if valid: parts_found += 1 else: print('invalid hcl')", "and int(right) <= 2020): parts_found +=1 else: print('invalid iyr') invalid = True elif", ">= 1920 and int(right) <= 2002): parts_found +=1 else: print('invalid byr') invalid =", "int(right) <= 2020): parts_found +=1 else: print('invalid iyr') invalid = True elif left", "invalid = True elif left == 'cid': has_cid = True elif left ==", "print( 'eyr', parts_found) if len(right) == 4 and (int(right) >= 2020 and int(right)", "for text in chuncks: parts = text.split() print(text) print(parts) parts_found = 0 has_cid", "print(left, right) if left == 'ecl': print( 'ecl', parts_found) valid_eye_color = ['amb', 'blu',", "and int(num) <=193: parts_found +=1 else: print('invalid cm') invalid = True elif unit", "'in': if int(num) >= 59 and int(num) <= 76: parts_found +=1 else: print('invalid", "print('invalid byr') invalid = True elif left == 'iyr': print( 'iyr', parts_found) if", "'2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e',", "print( 'iyr', parts_found) if len(right) == 4 and (int(right) >= 2010 and int(right)", "= False for part in parts: parts = part.split(':') left = parts[0] right", "parts_found +=1 else: print('invalid cm') invalid = True elif unit == 'in': if", "True elif left == 'byr': print( 'byr', parts_found) if len(right) == 4 and", "check if valid for text in chuncks: parts = text.split() print(text) print(parts) parts_found", "= True elif left == 'iyr': print( 'iyr', parts_found) if len(right) == 4", "# print(chuncks) valid_count = 0 # for each chunck split and check if", "True elif left == 'eyr': print( 'eyr', parts_found) if len(right) == 4 and", "print(chuncks) valid_count = 0 # for each chunck split and check if valid", "9: parts_found +=1 else: print('invalid pid') invalid = True elif left == 'eyr':", "if right in valid_eye_color: parts_found +=1 else: print('invalid eye') invalid = True elif", "valid = valid and right[5] in valid_chars valid = valid and right[6] in", "valid = valid and right[4] in valid_chars valid = valid and right[5] in", "print('invalid eye') invalid = True elif left == 'pid': print( 'pid', parts_found) if", "'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd',", "in valid_chars valid = valid and right[6] in valid_chars if valid: parts_found +=", "print(text) print(parts) parts_found = 0 has_cid = False invalid = False for part", "left == 'ecl': print( 'ecl', parts_found) valid_eye_color = ['amb', 'blu', 'brn', 'gry', 'grn',", "left = parts[0] right = parts[1] print(left, right) if left == 'ecl': print(", "4 and (int(right) >= 1920 and int(right) <= 2002): parts_found +=1 else: print('invalid", "== 'hgt': print('hgt', parts_found) num = right[:-2] unit = right[-2:] if unit ==", "'hzl', 'oth'] if right in valid_eye_color: parts_found +=1 else: print('invalid eye') invalid =", "print('invalid cm') invalid = True else: print('invalid unit') invalid = True print(parts_found, invalid)", "True elif left == 'hcl': print( 'hcl', parts_found) valid = True valid_chars =", "= all_text.split('\\n\\n') # print(chuncks) valid_count = 0 # for each chunck split and", "print('invalid iyr') invalid = True elif left == 'cid': has_cid = True elif", "else: print('invalid eyr') invalid = True elif left == 'hcl': print( 'hcl', parts_found)", "invalid = True elif left == 'byr': print( 'byr', parts_found) if len(right) ==", "parts[1] print(left, right) if left == 'ecl': print( 'ecl', parts_found) valid_eye_color = ['amb',", "print( 'hcl', parts_found) valid = True valid_chars = ['0', '1', '2', '3', '4',", "all_text = f.read() chuncks = all_text.split('\\n\\n') # print(chuncks) valid_count = 0 # for", "(int(right) >= 2010 and int(right) <= 2020): parts_found +=1 else: print('invalid iyr') invalid", "invalid = True elif left == 'pid': print( 'pid', parts_found) if len(right) ==", "= valid and right[5] in valid_chars valid = valid and right[6] in valid_chars", "right[5] in valid_chars valid = valid and right[6] in valid_chars if valid: parts_found", "<= 2002): parts_found +=1 else: print('invalid byr') invalid = True elif left ==", "= text.split() print(text) print(parts) parts_found = 0 has_cid = False invalid = False", "part in parts: parts = part.split(':') left = parts[0] right = parts[1] print(left,", "= ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',", "int(right) <= 2030): parts_found +=1 else: print('invalid eyr') invalid = True elif left", "as f: all_text = f.read() chuncks = all_text.split('\\n\\n') # print(chuncks) valid_count = 0", "parts = text.split() print(text) print(parts) parts_found = 0 has_cid = False invalid =", "4 and (int(right) >= 2020 and int(right) <= 2030): parts_found +=1 else: print('invalid", "left == 'hgt': print('hgt', parts_found) num = right[:-2] unit = right[-2:] if unit", "elif left == 'hcl': print( 'hcl', parts_found) valid = True valid_chars = ['0',", "'cid': has_cid = True elif left == 'hgt': print('hgt', parts_found) num = right[:-2]", "'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] if right in valid_eye_color: parts_found +=1 else:", "text in chuncks: parts = text.split() print(text) print(parts) parts_found = 0 has_cid =", "and right[1] in valid_chars valid = valid and right[2] in valid_chars valid =", "parts_found) if len(right) == 4 and (int(right) >= 2020 and int(right) <= 2030):", "= valid and right[6] in valid_chars if valid: parts_found += 1 else: print('invalid", "2010 and int(right) <= 2020): parts_found +=1 else: print('invalid iyr') invalid = True", "+=1 else: print('invalid pid') invalid = True elif left == 'eyr': print( 'eyr',", "True elif left == 'iyr': print( 'iyr', parts_found) if len(right) == 4 and", ">= 150 and int(num) <=193: parts_found +=1 else: print('invalid cm') invalid = True", "'oth'] if right in valid_eye_color: parts_found +=1 else: print('invalid eye') invalid = True", "<reponame>CalebRoberts65101/AdventOfCode with open('python\\\\2020\\day4\\data.txt') as f: all_text = f.read() chuncks = all_text.split('\\n\\n') # print(chuncks)", "59 and int(num) <= 76: parts_found +=1 else: print('invalid cm') invalid = True", "elif unit == 'in': if int(num) >= 59 and int(num) <= 76: parts_found", "else: print('invalid byr') invalid = True elif left == 'iyr': print( 'iyr', parts_found)", "in valid_chars valid = valid and right[2] in valid_chars valid = valid and", "and right[6] in valid_chars if valid: parts_found += 1 else: print('invalid hcl') invalid", "if len(right) == 4 and (int(right) >= 1920 and int(right) <= 2002): parts_found", "print('hgt', parts_found) num = right[:-2] unit = right[-2:] if unit == 'cm': if", "True valid_chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',", "'c', 'd', 'e', 'f'] valid = valid and right[0] == '#' valid =", "= valid and right[3] in valid_chars valid = valid and right[4] in valid_chars", "== 'iyr': print( 'iyr', parts_found) if len(right) == 4 and (int(right) >= 2010", "in valid_chars if valid: parts_found += 1 else: print('invalid hcl') invalid = True", "'brn', 'gry', 'grn', 'hzl', 'oth'] if right in valid_eye_color: parts_found +=1 else: print('invalid", "'hcl': print( 'hcl', parts_found) valid = True valid_chars = ['0', '1', '2', '3',", "== 'eyr': print( 'eyr', parts_found) if len(right) == 4 and (int(right) >= 2020", "chuncks = all_text.split('\\n\\n') # print(chuncks) valid_count = 0 # for each chunck split", "and right[5] in valid_chars valid = valid and right[6] in valid_chars if valid:", "if len(right) == 4 and (int(right) >= 2010 and int(right) <= 2020): parts_found", "valid_chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a',", "and (int(right) >= 1920 and int(right) <= 2002): parts_found +=1 else: print('invalid byr')", "== 'ecl': print( 'ecl', parts_found) valid_eye_color = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl',", "+=1 else: print('invalid cm') invalid = True else: print('invalid unit') invalid = True", "byr') invalid = True elif left == 'iyr': print( 'iyr', parts_found) if len(right)", "True elif left == 'hgt': print('hgt', parts_found) num = right[:-2] unit = right[-2:]", "right[6] in valid_chars if valid: parts_found += 1 else: print('invalid hcl') invalid =", "<= 2020): parts_found +=1 else: print('invalid iyr') invalid = True elif left ==", "'gry', 'grn', 'hzl', 'oth'] if right in valid_eye_color: parts_found +=1 else: print('invalid eye')", "pid') invalid = True elif left == 'eyr': print( 'eyr', parts_found) if len(right)", "else: print('invalid iyr') invalid = True elif left == 'cid': has_cid = True", "parts_found = 0 has_cid = False invalid = False for part in parts:", "= valid and right[0] == '#' valid = valid and right[1] in valid_chars", "valid = valid and right[1] in valid_chars valid = valid and right[2] in", "else: print('invalid hcl') invalid = True elif left == 'byr': print( 'byr', parts_found)", "has_cid = True elif left == 'hgt': print('hgt', parts_found) num = right[:-2] unit", "<= 76: parts_found +=1 else: print('invalid cm') invalid = True else: print('invalid unit')", "print('invalid unit') invalid = True print(parts_found, invalid) if parts_found == 7 and not", "with open('python\\\\2020\\day4\\data.txt') as f: all_text = f.read() chuncks = all_text.split('\\n\\n') # print(chuncks) valid_count", "right) if left == 'ecl': print( 'ecl', parts_found) valid_eye_color = ['amb', 'blu', 'brn',", "= parts[1] print(left, right) if left == 'ecl': print( 'ecl', parts_found) valid_eye_color =", "== 'cm': if int(num) >= 150 and int(num) <=193: parts_found +=1 else: print('invalid", "<=193: parts_found +=1 else: print('invalid cm') invalid = True elif unit == 'in':", "invalid = False for part in parts: parts = part.split(':') left = parts[0]", "if len(right) == 9: parts_found +=1 else: print('invalid pid') invalid = True elif", "else: print('invalid pid') invalid = True elif left == 'eyr': print( 'eyr', parts_found)", "['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c',", "== '#' valid = valid and right[1] in valid_chars valid = valid and", "left == 'cid': has_cid = True elif left == 'hgt': print('hgt', parts_found) num", "2020 and int(right) <= 2030): parts_found +=1 else: print('invalid eyr') invalid = True", "else: print('invalid cm') invalid = True elif unit == 'in': if int(num) >=", "= f.read() chuncks = all_text.split('\\n\\n') # print(chuncks) valid_count = 0 # for each", "= right[-2:] if unit == 'cm': if int(num) >= 150 and int(num) <=193:", "== 'pid': print( 'pid', parts_found) if len(right) == 9: parts_found +=1 else: print('invalid", "'#' valid = valid and right[1] in valid_chars valid = valid and right[2]", "invalid) if parts_found == 7 and not invalid: valid_count+=1 print(\"valid passports:\", valid_count) #", "'9', 'a', 'b', 'c', 'd', 'e', 'f'] valid = valid and right[0] ==", "= parts[0] right = parts[1] print(left, right) if left == 'ecl': print( 'ecl',", "in valid_chars valid = valid and right[4] in valid_chars valid = valid and", "invalid = True else: print('invalid unit') invalid = True print(parts_found, invalid) if parts_found", "cm') invalid = True elif unit == 'in': if int(num) >= 59 and", "valid_chars valid = valid and right[3] in valid_chars valid = valid and right[4]", "valid_count = 0 # for each chunck split and check if valid for", "else: print('invalid eye') invalid = True elif left == 'pid': print( 'pid', parts_found)", "each chunck split and check if valid for text in chuncks: parts =", "== 7 and not invalid: valid_count+=1 print(\"valid passports:\", valid_count) # end part 1", "int(num) >= 150 and int(num) <=193: parts_found +=1 else: print('invalid cm') invalid =", "1920 and int(right) <= 2002): parts_found +=1 else: print('invalid byr') invalid = True", "+=1 else: print('invalid iyr') invalid = True elif left == 'cid': has_cid =", "valid: parts_found += 1 else: print('invalid hcl') invalid = True elif left ==", "and (int(right) >= 2010 and int(right) <= 2020): parts_found +=1 else: print('invalid iyr')", "= True elif left == 'byr': print( 'byr', parts_found) if len(right) == 4", "print(parts_found, invalid) if parts_found == 7 and not invalid: valid_count+=1 print(\"valid passports:\", valid_count)", "elif left == 'cid': has_cid = True elif left == 'hgt': print('hgt', parts_found)", "right[2] in valid_chars valid = valid and right[3] in valid_chars valid = valid", "valid_chars valid = valid and right[2] in valid_chars valid = valid and right[3]", "valid = True valid_chars = ['0', '1', '2', '3', '4', '5', '6', '7',", "4 and (int(right) >= 2010 and int(right) <= 2020): parts_found +=1 else: print('invalid", "'byr': print( 'byr', parts_found) if len(right) == 4 and (int(right) >= 1920 and", "'a', 'b', 'c', 'd', 'e', 'f'] valid = valid and right[0] == '#'", "valid and right[3] in valid_chars valid = valid and right[4] in valid_chars valid", "invalid = True print(parts_found, invalid) if parts_found == 7 and not invalid: valid_count+=1", "parts_found) if len(right) == 4 and (int(right) >= 1920 and int(right) <= 2002):", "valid = valid and right[3] in valid_chars valid = valid and right[4] in", "int(num) <= 76: parts_found +=1 else: print('invalid cm') invalid = True else: print('invalid", "valid and right[4] in valid_chars valid = valid and right[5] in valid_chars valid", "and right[0] == '#' valid = valid and right[1] in valid_chars valid =", "= part.split(':') left = parts[0] right = parts[1] print(left, right) if left ==", "= True elif left == 'eyr': print( 'eyr', parts_found) if len(right) == 4", "'byr', parts_found) if len(right) == 4 and (int(right) >= 1920 and int(right) <=", "int(num) <=193: parts_found +=1 else: print('invalid cm') invalid = True elif unit ==", "for each chunck split and check if valid for text in chuncks: parts", "elif left == 'byr': print( 'byr', parts_found) if len(right) == 4 and (int(right)", "'f'] valid = valid and right[0] == '#' valid = valid and right[1]", "= True elif left == 'hgt': print('hgt', parts_found) num = right[:-2] unit =", "right[-2:] if unit == 'cm': if int(num) >= 150 and int(num) <=193: parts_found", "+=1 else: print('invalid eyr') invalid = True elif left == 'hcl': print( 'hcl',", "elif left == 'hgt': print('hgt', parts_found) num = right[:-2] unit = right[-2:] if", "right[3] in valid_chars valid = valid and right[4] in valid_chars valid = valid", "'d', 'e', 'f'] valid = valid and right[0] == '#' valid = valid", "150 and int(num) <=193: parts_found +=1 else: print('invalid cm') invalid = True elif", "parts = part.split(':') left = parts[0] right = parts[1] print(left, right) if left", "if unit == 'cm': if int(num) >= 150 and int(num) <=193: parts_found +=1", "left == 'pid': print( 'pid', parts_found) if len(right) == 9: parts_found +=1 else:", "== 'in': if int(num) >= 59 and int(num) <= 76: parts_found +=1 else:", "0 # for each chunck split and check if valid for text in", "parts_found +=1 else: print('invalid byr') invalid = True elif left == 'iyr': print(", "and check if valid for text in chuncks: parts = text.split() print(text) print(parts)", "'pid', parts_found) if len(right) == 9: parts_found +=1 else: print('invalid pid') invalid =", "print('invalid cm') invalid = True elif unit == 'in': if int(num) >= 59", "'3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']", "# for each chunck split and check if valid for text in chuncks:", "print(parts) parts_found = 0 has_cid = False invalid = False for part in", "valid and right[5] in valid_chars valid = valid and right[6] in valid_chars if", "= True elif left == 'pid': print( 'pid', parts_found) if len(right) == 9:", "if left == 'ecl': print( 'ecl', parts_found) valid_eye_color = ['amb', 'blu', 'brn', 'gry',", "print('invalid eyr') invalid = True elif left == 'hcl': print( 'hcl', parts_found) valid" ]
[ "python # -*- coding: utf-8 -*- import sys import zipfile def make_nvz_main(output_file, nvm_file,", "nvm_file = 'outputs/target.nvm' target_file = 'outputs/target.pb' pitch_file = 'outputs/pitch.pb' if len(sys.argv) == 5:", "'outputs/target.pb' pitch_file = 'outputs/pitch.pb' if len(sys.argv) == 5: nvm_file = sys.argv[2] target_file =", "pitch_file = 'outputs/pitch.pb' if len(sys.argv) == 5: nvm_file = sys.argv[2] target_file = sys.argv[3]", "output_file = sys.argv[1] nvm_file = 'outputs/target.nvm' target_file = 'outputs/target.pb' pitch_file = 'outputs/pitch.pb' if", "if len(sys.argv) > 1: output_file = sys.argv[1] nvm_file = 'outputs/target.nvm' target_file = 'outputs/target.pb'", "pitch_file is not None: files = [nvm_file, target_file, pitch_file] arc_names = ['target.nvm', 'target.pb',", "import sys import zipfile def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None): if pitch_file is not", "in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__ == '__main__': output_file = 'outputs/target.nvz' if len(sys.argv)", "range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__ == '__main__': output_file = 'outputs/target.nvz' if len(sys.argv) >", "sys.argv[1] nvm_file = 'outputs/target.nvm' target_file = 'outputs/target.pb' pitch_file = 'outputs/pitch.pb' if len(sys.argv) ==", "utf-8 -*- import sys import zipfile def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None): if pitch_file", "zipfile def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None): if pitch_file is not None: files =", "target_file = 'outputs/target.pb' pitch_file = 'outputs/pitch.pb' if len(sys.argv) == 5: nvm_file = sys.argv[2]", "# -*- coding: utf-8 -*- import sys import zipfile def make_nvz_main(output_file, nvm_file, target_file,", "else: files = [nvm_file, target_file] arc_names = ['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED)", "if pitch_file is not None: files = [nvm_file, target_file, pitch_file] arc_names = ['target.nvm',", "'outputs/target.nvz' if len(sys.argv) > 1: output_file = sys.argv[1] nvm_file = 'outputs/target.nvm' target_file =", "None: files = [nvm_file, target_file, pitch_file] arc_names = ['target.nvm', 'target.pb', 'pitch.pb'] else: files", "nvm_file = sys.argv[2] target_file = sys.argv[3] pitch_file = sys.argv[4] make_nvz_main(output_file, nvm_file, target_file, pitch_file)", "[nvm_file, target_file] arc_names = ['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip: for", "== '__main__': output_file = 'outputs/target.nvz' if len(sys.argv) > 1: output_file = sys.argv[1] nvm_file", "== 5: nvm_file = sys.argv[2] target_file = sys.argv[3] pitch_file = sys.argv[4] make_nvz_main(output_file, nvm_file,", "not None: files = [nvm_file, target_file, pitch_file] arc_names = ['target.nvm', 'target.pb', 'pitch.pb'] else:", "'w', compression=zipfile.ZIP_DEFLATED) as new_zip: for file_loop in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__ ==", "__name__ == '__main__': output_file = 'outputs/target.nvz' if len(sys.argv) > 1: output_file = sys.argv[1]", "output_file = 'outputs/target.nvz' if len(sys.argv) > 1: output_file = sys.argv[1] nvm_file = 'outputs/target.nvm'", "import zipfile def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None): if pitch_file is not None: files", "sys import zipfile def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None): if pitch_file is not None:", "[nvm_file, target_file, pitch_file] arc_names = ['target.nvm', 'target.pb', 'pitch.pb'] else: files = [nvm_file, target_file]", "compression=zipfile.ZIP_DEFLATED) as new_zip: for file_loop in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__ == '__main__':", "= [nvm_file, target_file, pitch_file] arc_names = ['target.nvm', 'target.pb', 'pitch.pb'] else: files = [nvm_file,", "for file_loop in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__ == '__main__': output_file = 'outputs/target.nvz'", "files = [nvm_file, target_file] arc_names = ['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as", "= 'outputs/target.nvm' target_file = 'outputs/target.pb' pitch_file = 'outputs/pitch.pb' if len(sys.argv) == 5: nvm_file", "#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import zipfile def make_nvz_main(output_file,", "zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip: for file_loop in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__", "target_file, pitch_file=None): if pitch_file is not None: files = [nvm_file, target_file, pitch_file] arc_names", "arc_names = ['target.nvm', 'target.pb', 'pitch.pb'] else: files = [nvm_file, target_file] arc_names = ['target.nvm',", "len(sys.argv) > 1: output_file = sys.argv[1] nvm_file = 'outputs/target.nvm' target_file = 'outputs/target.pb' pitch_file", "new_zip: for file_loop in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__ == '__main__': output_file =", "arcname=arc_names[file_loop]) if __name__ == '__main__': output_file = 'outputs/target.nvz' if len(sys.argv) > 1: output_file", "= sys.argv[1] nvm_file = 'outputs/target.nvm' target_file = 'outputs/target.pb' pitch_file = 'outputs/pitch.pb' if len(sys.argv)", "= [nvm_file, target_file] arc_names = ['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:", "if __name__ == '__main__': output_file = 'outputs/target.nvz' if len(sys.argv) > 1: output_file =", "1: output_file = sys.argv[1] nvm_file = 'outputs/target.nvm' target_file = 'outputs/target.pb' pitch_file = 'outputs/pitch.pb'", "-*- coding: utf-8 -*- import sys import zipfile def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None):", "'target.pb', 'pitch.pb'] else: files = [nvm_file, target_file] arc_names = ['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file,", "def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None): if pitch_file is not None: files = [nvm_file,", "'pitch.pb'] else: files = [nvm_file, target_file] arc_names = ['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file, 'w',", "make_nvz_main(output_file, nvm_file, target_file, pitch_file=None): if pitch_file is not None: files = [nvm_file, target_file,", "['target.nvm', 'target.pb', 'pitch.pb'] else: files = [nvm_file, target_file] arc_names = ['target.nvm', 'target.pb'] with", "target_file] arc_names = ['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip: for file_loop", "is not None: files = [nvm_file, target_file, pitch_file] arc_names = ['target.nvm', 'target.pb', 'pitch.pb']", "pitch_file=None): if pitch_file is not None: files = [nvm_file, target_file, pitch_file] arc_names =", "'outputs/pitch.pb' if len(sys.argv) == 5: nvm_file = sys.argv[2] target_file = sys.argv[3] pitch_file =", "arc_names = ['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip: for file_loop in", "new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__ == '__main__': output_file = 'outputs/target.nvz' if len(sys.argv) > 1:", "if len(sys.argv) == 5: nvm_file = sys.argv[2] target_file = sys.argv[3] pitch_file = sys.argv[4]", "= ['target.nvm', 'target.pb', 'pitch.pb'] else: files = [nvm_file, target_file] arc_names = ['target.nvm', 'target.pb']", "files = [nvm_file, target_file, pitch_file] arc_names = ['target.nvm', 'target.pb', 'pitch.pb'] else: files =", "as new_zip: for file_loop in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__ == '__main__': output_file", "'outputs/target.nvm' target_file = 'outputs/target.pb' pitch_file = 'outputs/pitch.pb' if len(sys.argv) == 5: nvm_file =", "'__main__': output_file = 'outputs/target.nvz' if len(sys.argv) > 1: output_file = sys.argv[1] nvm_file =", "= 'outputs/target.pb' pitch_file = 'outputs/pitch.pb' if len(sys.argv) == 5: nvm_file = sys.argv[2] target_file", "> 1: output_file = sys.argv[1] nvm_file = 'outputs/target.nvm' target_file = 'outputs/target.pb' pitch_file =", "5: nvm_file = sys.argv[2] target_file = sys.argv[3] pitch_file = sys.argv[4] make_nvz_main(output_file, nvm_file, target_file,", "coding: utf-8 -*- import sys import zipfile def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None): if", "['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip: for file_loop in range(len(arc_names)): new_zip.write(files[file_loop],", "file_loop in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if __name__ == '__main__': output_file = 'outputs/target.nvz' if", "= ['target.nvm', 'target.pb'] with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip: for file_loop in range(len(arc_names)):", "with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip: for file_loop in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop]) if", "= 'outputs/target.nvz' if len(sys.argv) > 1: output_file = sys.argv[1] nvm_file = 'outputs/target.nvm' target_file", "nvm_file, target_file, pitch_file=None): if pitch_file is not None: files = [nvm_file, target_file, pitch_file]", "len(sys.argv) == 5: nvm_file = sys.argv[2] target_file = sys.argv[3] pitch_file = sys.argv[4] make_nvz_main(output_file,", "-*- import sys import zipfile def make_nvz_main(output_file, nvm_file, target_file, pitch_file=None): if pitch_file is", "target_file, pitch_file] arc_names = ['target.nvm', 'target.pb', 'pitch.pb'] else: files = [nvm_file, target_file] arc_names", "= 'outputs/pitch.pb' if len(sys.argv) == 5: nvm_file = sys.argv[2] target_file = sys.argv[3] pitch_file", "'target.pb'] with zipfile.ZipFile(output_file, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip: for file_loop in range(len(arc_names)): new_zip.write(files[file_loop], arcname=arc_names[file_loop])", "pitch_file] arc_names = ['target.nvm', 'target.pb', 'pitch.pb'] else: files = [nvm_file, target_file] arc_names =" ]
[ "remove_case_from_db @click.command('remove') @click.argument('case_id') @click.pass_context def remove_command(context, case_id): \"\"\" Deletes case from mutacc DB", "def remove_command(context, case_id): \"\"\" Deletes case from mutacc DB \"\"\" adapter = context.obj['adapter']", "import remove_case_from_db @click.command('remove') @click.argument('case_id') @click.pass_context def remove_command(context, case_id): \"\"\" Deletes case from mutacc", "remove_command(context, case_id): \"\"\" Deletes case from mutacc DB \"\"\" adapter = context.obj['adapter'] remove_case_from_db(adapter,", "@click.argument('case_id') @click.pass_context def remove_command(context, case_id): \"\"\" Deletes case from mutacc DB \"\"\" adapter", "case_id): \"\"\" Deletes case from mutacc DB \"\"\" adapter = context.obj['adapter'] remove_case_from_db(adapter, case_id)", "@click.pass_context def remove_command(context, case_id): \"\"\" Deletes case from mutacc DB \"\"\" adapter =", "mutacc.mutaccDB.remove_case import remove_case_from_db @click.command('remove') @click.argument('case_id') @click.pass_context def remove_command(context, case_id): \"\"\" Deletes case from", "click from mutacc.mutaccDB.remove_case import remove_case_from_db @click.command('remove') @click.argument('case_id') @click.pass_context def remove_command(context, case_id): \"\"\" Deletes", "@click.command('remove') @click.argument('case_id') @click.pass_context def remove_command(context, case_id): \"\"\" Deletes case from mutacc DB \"\"\"", "import click from mutacc.mutaccDB.remove_case import remove_case_from_db @click.command('remove') @click.argument('case_id') @click.pass_context def remove_command(context, case_id): \"\"\"", "from mutacc.mutaccDB.remove_case import remove_case_from_db @click.command('remove') @click.argument('case_id') @click.pass_context def remove_command(context, case_id): \"\"\" Deletes case" ]
[]
[ "\"\" def init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0) return idaapi.PLUGIN_OK def run(self, arg=0): print(\"hell2\") idaapi.msg(\"run()", "# Color the Calls off-white if print_insn_mnem(i) == \"call\": funcCalls.append(i) # Color Anti-VM", "CIC_ITEM import idaapi #idaapi.auto_wait() PLUGIN_TEST = 1 class FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment", "= [] for i in heads: # Color the Calls off-white if print_insn_mnem(i)", "\"smsw\", \"str\", \"in\", \"cpuid\"): antiVM.append(i) # Color non-zeroing out xor instructions Orange elif", "xor.append(i) print(\"Number of calls: %d\" % (len(funcCalls))) for i in funcCalls: set_color(i, CIC_ITEM,", "print(\"hell2\") idaapi.msg(\"run() called with %d!\\n\" % arg) heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls =", "% (len(funcCalls))) for i in funcCalls: set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number of potential Anti-VM", "for help :) from __future__ import print_function from idautils import Heads from idc", "0 SILENT # thanks @JR0driguezB for help :) from __future__ import print_function from", "thanks @JR0driguezB for help :) from __future__ import print_function from idautils import Heads", "idautils import Heads from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM", "heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls = [] xor = [] antiVM = []", "# http://www.hexblog.com/?p=120 # Default IDA Pro Paths: # MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ #", "#idaapi.auto_wait() PLUGIN_TEST = 1 class FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment = \"Set colors", "FullColor.py 0 0 SILENT # thanks @JR0driguezB for help :) from __future__ import", "help :) from __future__ import print_function from idautils import Heads from idc import", "1 class FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment = \"Set colors :)\" help =", "Orange elif print_insn_mnem(i) == \"xor\" and (print_operand(i,0) != print_operand(i,1)): xor.append(i) print(\"Number of calls:", "Paths: # MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program Files (x86)\\IDA X\\plugins #", "antiVM: print(\"Anti-VM potential at %x\" % i) set_color(i, CIC_ITEM, 0x0000ff) print(\"Number of xor:", "idaapi #idaapi.auto_wait() PLUGIN_TEST = 1 class FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment = \"Set", "(x86)\\IDA X\\plugins # to make it autoexec on openfile # add this to", "\"Set colors :)\" help = \"No help needed\" wanted_name = \"FullColor\" wanted_hotkey =", "\"call\": funcCalls.append(i) # Color Anti-VM instructions Red and print their location elif print_insn_mnem(i)", "MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program Files (x86)\\IDA X\\plugins # to make", "return FullColor_t() if PLUGIN_TEST: # Create form f = PLUGIN_ENTRY() f.init() f.run() f.term()", "potential Anti-VM instructions: %d\" % (len(antiVM))) for i in antiVM: print(\"Anti-VM potential at", "make it autoexec on openfile # add this to plugins.cfg # ; Other", "antiVM = [] for i in heads: # Color the Calls off-white if", "get_screen_ea, print_operand, set_color, CIC_ITEM import idaapi #idaapi.auto_wait() PLUGIN_TEST = 1 class FullColor_t(idaapi.plugin_t): flags", "help = \"No help needed\" wanted_name = \"FullColor\" wanted_hotkey = \"\" def init(self):", "class FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment = \"Set colors :)\" help = \"No", "set_color, CIC_ITEM import idaapi #idaapi.auto_wait() PLUGIN_TEST = 1 class FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL", "the Calls off-white if print_insn_mnem(i) == \"call\": funcCalls.append(i) # Color Anti-VM instructions Red", "utf-8 # http://www.hexblog.com/?p=120 # Default IDA Pro Paths: # MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/", "i in antiVM: print(\"Anti-VM potential at %x\" % i) set_color(i, CIC_ITEM, 0x0000ff) print(\"Number", "Default IDA Pro Paths: # MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program Files", "flags = idaapi.PLUGIN_UNL comment = \"Set colors :)\" help = \"No help needed\"", "def term(self): idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY(): return FullColor_t() if PLUGIN_TEST: # Create form", "\"str\", \"in\", \"cpuid\"): antiVM.append(i) # Color non-zeroing out xor instructions Orange elif print_insn_mnem(i)", "wanted_name = \"FullColor\" wanted_hotkey = \"\" def init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0) return idaapi.PLUGIN_OK", "funcCalls.append(i) # Color Anti-VM instructions Red and print their location elif print_insn_mnem(i) in", "their location elif print_insn_mnem(i) in (\"sidt\", \"sgdt\", \"sldt\", \"smsw\", \"str\", \"in\", \"cpuid\"): antiVM.append(i)", "at %x\" % i) set_color(i, CIC_ITEM, 0x0000ff) print(\"Number of xor: %d\" % (len(xor)))", "CIC_ITEM, 0x00a5ff) def term(self): idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY(): return FullColor_t() if PLUGIN_TEST: #", "= \"FullColor\" wanted_hotkey = \"\" def init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0) return idaapi.PLUGIN_OK def", "= [] antiVM = [] for i in heads: # Color the Calls", "of xor: %d\" % (len(xor))) for i in xor: set_color(i, CIC_ITEM, 0x00a5ff) def", "idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM import idaapi #idaapi.auto_wait() PLUGIN_TEST", "instructions Orange elif print_insn_mnem(i) == \"xor\" and (print_operand(i,0) != print_operand(i,1)): xor.append(i) print(\"Number of", "\"cpuid\"): antiVM.append(i) # Color non-zeroing out xor instructions Orange elif print_insn_mnem(i) == \"xor\"", "print(\"Anti-VM potential at %x\" % i) set_color(i, CIC_ITEM, 0x0000ff) print(\"Number of xor: %d\"", "xor: set_color(i, CIC_ITEM, 0x00a5ff) def term(self): idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY(): return FullColor_t() if", "openfile # add this to plugins.cfg # ; Other plugins #FullColor FullColor.py 0", "% (len(antiVM))) for i in antiVM: print(\"Anti-VM potential at %x\" % i) set_color(i,", "% i) set_color(i, CIC_ITEM, 0x0000ff) print(\"Number of xor: %d\" % (len(xor))) for i", "# Color non-zeroing out xor instructions Orange elif print_insn_mnem(i) == \"xor\" and (print_operand(i,0)", "from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM import idaapi #idaapi.auto_wait()", "\"xor\" and (print_operand(i,0) != print_operand(i,1)): xor.append(i) print(\"Number of calls: %d\" % (len(funcCalls))) for", "= Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls = [] xor = [] antiVM = [] for", "funcCalls = [] xor = [] antiVM = [] for i in heads:", "Anti-VM instructions Red and print their location elif print_insn_mnem(i) in (\"sidt\", \"sgdt\", \"sldt\",", "print_insn_mnem(i) == \"xor\" and (print_operand(i,0) != print_operand(i,1)): xor.append(i) print(\"Number of calls: %d\" %", "print_operand(i,1)): xor.append(i) print(\"Number of calls: %d\" % (len(funcCalls))) for i in funcCalls: set_color(i,", "Files (x86)\\IDA X\\plugins # to make it autoexec on openfile # add this", "= \"No help needed\" wanted_name = \"FullColor\" wanted_hotkey = \"\" def init(self): #idaapi.msg(\"init()", "0 0 SILENT # thanks @JR0driguezB for help :) from __future__ import print_function", "Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program Files (x86)\\IDA X\\plugins # to make it autoexec", "init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0) return idaapi.PLUGIN_OK def run(self, arg=0): print(\"hell2\") idaapi.msg(\"run() called with", "CIC_ITEM, 0x0000ff) print(\"Number of xor: %d\" % (len(xor))) for i in xor: set_color(i,", "# thanks @JR0driguezB for help :) from __future__ import print_function from idautils import", "PLUGIN_TEST = 1 class FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment = \"Set colors :)\"", "\"sgdt\", \"sldt\", \"smsw\", \"str\", \"in\", \"cpuid\"): antiVM.append(i) # Color non-zeroing out xor instructions", "idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY(): return FullColor_t() if PLUGIN_TEST: # Create form f =", "in heads: # Color the Calls off-white if print_insn_mnem(i) == \"call\": funcCalls.append(i) #", "\"sldt\", \"smsw\", \"str\", \"in\", \"cpuid\"): antiVM.append(i) # Color non-zeroing out xor instructions Orange", "set_color(i, CIC_ITEM, 0x0000ff) print(\"Number of xor: %d\" % (len(xor))) for i in xor:", "(print_operand(i,0) != print_operand(i,1)): xor.append(i) print(\"Number of calls: %d\" % (len(funcCalls))) for i in", "and print their location elif print_insn_mnem(i) in (\"sidt\", \"sgdt\", \"sldt\", \"smsw\", \"str\", \"in\",", "get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM import idaapi #idaapi.auto_wait() PLUGIN_TEST = 1 class", "import idaapi #idaapi.auto_wait() PLUGIN_TEST = 1 class FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment =", "arg=0): print(\"hell2\") idaapi.msg(\"run() called with %d!\\n\" % arg) heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls", "Color Anti-VM instructions Red and print their location elif print_insn_mnem(i) in (\"sidt\", \"sgdt\",", "print(\"Number of calls: %d\" % (len(funcCalls))) for i in funcCalls: set_color(i, CIC_ITEM, 0xc7fdff)", "(len(antiVM))) for i in antiVM: print(\"Anti-VM potential at %x\" % i) set_color(i, CIC_ITEM,", "%d\" % (len(antiVM))) for i in antiVM: print(\"Anti-VM potential at %x\" % i)", "# Color Anti-VM instructions Red and print their location elif print_insn_mnem(i) in (\"sidt\",", "xor instructions Orange elif print_insn_mnem(i) == \"xor\" and (print_operand(i,0) != print_operand(i,1)): xor.append(i) print(\"Number", "[] xor = [] antiVM = [] for i in heads: # Color", "for i in antiVM: print(\"Anti-VM potential at %x\" % i) set_color(i, CIC_ITEM, 0x0000ff)", "from idautils import Heads from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color,", "encoding: utf-8 # http://www.hexblog.com/?p=120 # Default IDA Pro Paths: # MAC /Applications/IDA\\ Pro\\", "[] for i in heads: # Color the Calls off-white if print_insn_mnem(i) ==", "CIC_ITEM, 0xc7fdff) print(\"Number of potential Anti-VM instructions: %d\" % (len(antiVM))) for i in", "0xc7fdff) print(\"Number of potential Anti-VM instructions: %d\" % (len(antiVM))) for i in antiVM:", "i in xor: set_color(i, CIC_ITEM, 0x00a5ff) def term(self): idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY(): return", "idaapi.PLUGIN_UNL comment = \"Set colors :)\" help = \"No help needed\" wanted_name =", "print their location elif print_insn_mnem(i) in (\"sidt\", \"sgdt\", \"sldt\", \"smsw\", \"str\", \"in\", \"cpuid\"):", "(len(xor))) for i in xor: set_color(i, CIC_ITEM, 0x00a5ff) def term(self): idaapi.msg(\"term() called!\\n\") def", "#idaapi.msg(\"init() called!\\n\") #self.run(0) return idaapi.PLUGIN_OK def run(self, arg=0): print(\"hell2\") idaapi.msg(\"run() called with %d!\\n\"", "import print_function from idautils import Heads from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea,", "print_insn_mnem(i) in (\"sidt\", \"sgdt\", \"sldt\", \"smsw\", \"str\", \"in\", \"cpuid\"): antiVM.append(i) # Color non-zeroing", "print_function from idautils import Heads from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand,", "i in funcCalls: set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number of potential Anti-VM instructions: %d\" %", "import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM import idaapi #idaapi.auto_wait() PLUGIN_TEST =", "this to plugins.cfg # ; Other plugins #FullColor FullColor.py 0 0 SILENT #", "%x\" % i) set_color(i, CIC_ITEM, 0x0000ff) print(\"Number of xor: %d\" % (len(xor))) for", "# add this to plugins.cfg # ; Other plugins #FullColor FullColor.py 0 0", "set_color(i, CIC_ITEM, 0x00a5ff) def term(self): idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY(): return FullColor_t() if PLUGIN_TEST:", "and (print_operand(i,0) != print_operand(i,1)): xor.append(i) print(\"Number of calls: %d\" % (len(funcCalls))) for i", "in (\"sidt\", \"sgdt\", \"sldt\", \"smsw\", \"str\", \"in\", \"cpuid\"): antiVM.append(i) # Color non-zeroing out", "# Windows C:\\Program Files (x86)\\IDA X\\plugins # to make it autoexec on openfile", "Anti-VM instructions: %d\" % (len(antiVM))) for i in antiVM: print(\"Anti-VM potential at %x\"", "idaapi.PLUGIN_OK def run(self, arg=0): print(\"hell2\") idaapi.msg(\"run() called with %d!\\n\" % arg) heads =", "run(self, arg=0): print(\"hell2\") idaapi.msg(\"run() called with %d!\\n\" % arg) heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea()))", "for i in funcCalls: set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number of potential Anti-VM instructions: %d\"", "potential at %x\" % i) set_color(i, CIC_ITEM, 0x0000ff) print(\"Number of xor: %d\" %", "; Other plugins #FullColor FullColor.py 0 0 SILENT # thanks @JR0driguezB for help", "idaapi.msg(\"run() called with %d!\\n\" % arg) heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls = []", "def run(self, arg=0): print(\"hell2\") idaapi.msg(\"run() called with %d!\\n\" % arg) heads = Heads(get_segm_start(get_screen_ea()),", "non-zeroing out xor instructions Orange elif print_insn_mnem(i) == \"xor\" and (print_operand(i,0) != print_operand(i,1)):", "C:\\Program Files (x86)\\IDA X\\plugins # to make it autoexec on openfile # add", "in antiVM: print(\"Anti-VM potential at %x\" % i) set_color(i, CIC_ITEM, 0x0000ff) print(\"Number of", "xor = [] antiVM = [] for i in heads: # Color the", "@JR0driguezB for help :) from __future__ import print_function from idautils import Heads from", "[] antiVM = [] for i in heads: # Color the Calls off-white", "to plugins.cfg # ; Other plugins #FullColor FullColor.py 0 0 SILENT # thanks", ":)\" help = \"No help needed\" wanted_name = \"FullColor\" wanted_hotkey = \"\" def", "= [] xor = [] antiVM = [] for i in heads: #", "for i in heads: # Color the Calls off-white if print_insn_mnem(i) == \"call\":", "in xor: set_color(i, CIC_ITEM, 0x00a5ff) def term(self): idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY(): return FullColor_t()", "term(self): idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY(): return FullColor_t() if PLUGIN_TEST: # Create form f", "print_operand, set_color, CIC_ITEM import idaapi #idaapi.auto_wait() PLUGIN_TEST = 1 class FullColor_t(idaapi.plugin_t): flags =", "\"FullColor\" wanted_hotkey = \"\" def init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0) return idaapi.PLUGIN_OK def run(self,", "return idaapi.PLUGIN_OK def run(self, arg=0): print(\"hell2\") idaapi.msg(\"run() called with %d!\\n\" % arg) heads", "location elif print_insn_mnem(i) in (\"sidt\", \"sgdt\", \"sldt\", \"smsw\", \"str\", \"in\", \"cpuid\"): antiVM.append(i) #", "plugins.cfg # ; Other plugins #FullColor FullColor.py 0 0 SILENT # thanks @JR0driguezB", "of calls: %d\" % (len(funcCalls))) for i in funcCalls: set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number", "#self.run(0) return idaapi.PLUGIN_OK def run(self, arg=0): print(\"hell2\") idaapi.msg(\"run() called with %d!\\n\" % arg)", "out xor instructions Orange elif print_insn_mnem(i) == \"xor\" and (print_operand(i,0) != print_operand(i,1)): xor.append(i)", "get_segm_end(get_screen_ea())) funcCalls = [] xor = [] antiVM = [] for i in", "# ; Other plugins #FullColor FullColor.py 0 0 SILENT # thanks @JR0driguezB for", "% arg) heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls = [] xor = [] antiVM", "set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number of potential Anti-VM instructions: %d\" % (len(antiVM))) for i", "# Default IDA Pro Paths: # MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program", "def PLUGIN_ENTRY(): return FullColor_t() if PLUGIN_TEST: # Create form f = PLUGIN_ENTRY() f.init()", "instructions: %d\" % (len(antiVM))) for i in antiVM: print(\"Anti-VM potential at %x\" %", "autoexec on openfile # add this to plugins.cfg # ; Other plugins #FullColor", "antiVM.append(i) # Color non-zeroing out xor instructions Orange elif print_insn_mnem(i) == \"xor\" and", "0x00a5ff) def term(self): idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY(): return FullColor_t() if PLUGIN_TEST: # Create", "= \"\" def init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0) return idaapi.PLUGIN_OK def run(self, arg=0): print(\"hell2\")", "to make it autoexec on openfile # add this to plugins.cfg # ;", "instructions Red and print their location elif print_insn_mnem(i) in (\"sidt\", \"sgdt\", \"sldt\", \"smsw\",", "called with %d!\\n\" % arg) heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls = [] xor", "Windows C:\\Program Files (x86)\\IDA X\\plugins # to make it autoexec on openfile #", "\"No help needed\" wanted_name = \"FullColor\" wanted_hotkey = \"\" def init(self): #idaapi.msg(\"init() called!\\n\")", "__future__ import print_function from idautils import Heads from idc import get_segm_start, get_segm_end, print_insn_mnem,", "Red and print their location elif print_insn_mnem(i) in (\"sidt\", \"sgdt\", \"sldt\", \"smsw\", \"str\",", "= idaapi.PLUGIN_UNL comment = \"Set colors :)\" help = \"No help needed\" wanted_name", "# MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program Files (x86)\\IDA X\\plugins # to", "SILENT # thanks @JR0driguezB for help :) from __future__ import print_function from idautils", "% (len(xor))) for i in xor: set_color(i, CIC_ITEM, 0x00a5ff) def term(self): idaapi.msg(\"term() called!\\n\")", "IDA Pro Paths: # MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program Files (x86)\\IDA", "# to make it autoexec on openfile # add this to plugins.cfg #", "FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment = \"Set colors :)\" help = \"No help", "def init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0) return idaapi.PLUGIN_OK def run(self, arg=0): print(\"hell2\") idaapi.msg(\"run() called", "Heads from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM import idaapi", "Color the Calls off-white if print_insn_mnem(i) == \"call\": funcCalls.append(i) # Color Anti-VM instructions", "(len(funcCalls))) for i in funcCalls: set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number of potential Anti-VM instructions:", "0x0000ff) print(\"Number of xor: %d\" % (len(xor))) for i in xor: set_color(i, CIC_ITEM,", "%d\" % (len(xor))) for i in xor: set_color(i, CIC_ITEM, 0x00a5ff) def term(self): idaapi.msg(\"term()", "on openfile # add this to plugins.cfg # ; Other plugins #FullColor FullColor.py", "called!\\n\") def PLUGIN_ENTRY(): return FullColor_t() if PLUGIN_TEST: # Create form f = PLUGIN_ENTRY()", "\"in\", \"cpuid\"): antiVM.append(i) # Color non-zeroing out xor instructions Orange elif print_insn_mnem(i) ==", "print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM import idaapi #idaapi.auto_wait() PLUGIN_TEST = 1 class FullColor_t(idaapi.plugin_t):", "Calls off-white if print_insn_mnem(i) == \"call\": funcCalls.append(i) # Color Anti-VM instructions Red and", "Other plugins #FullColor FullColor.py 0 0 SILENT # thanks @JR0driguezB for help :)", "plugins #FullColor FullColor.py 0 0 SILENT # thanks @JR0driguezB for help :) from", "with %d!\\n\" % arg) heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls = [] xor =", "funcCalls: set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number of potential Anti-VM instructions: %d\" % (len(antiVM))) for", "calls: %d\" % (len(funcCalls))) for i in funcCalls: set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number of", "= \"Set colors :)\" help = \"No help needed\" wanted_name = \"FullColor\" wanted_hotkey", "called!\\n\") #self.run(0) return idaapi.PLUGIN_OK def run(self, arg=0): print(\"hell2\") idaapi.msg(\"run() called with %d!\\n\" %", "PLUGIN_ENTRY(): return FullColor_t() if PLUGIN_TEST: # Create form f = PLUGIN_ENTRY() f.init() f.run()", "heads: # Color the Calls off-white if print_insn_mnem(i) == \"call\": funcCalls.append(i) # Color", "X\\plugins # to make it autoexec on openfile # add this to plugins.cfg", "elif print_insn_mnem(i) in (\"sidt\", \"sgdt\", \"sldt\", \"smsw\", \"str\", \"in\", \"cpuid\"): antiVM.append(i) # Color", "it autoexec on openfile # add this to plugins.cfg # ; Other plugins", "http://www.hexblog.com/?p=120 # Default IDA Pro Paths: # MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows", "Color non-zeroing out xor instructions Orange elif print_insn_mnem(i) == \"xor\" and (print_operand(i,0) !=", "print(\"Number of xor: %d\" % (len(xor))) for i in xor: set_color(i, CIC_ITEM, 0x00a5ff)", "if print_insn_mnem(i) == \"call\": funcCalls.append(i) # Color Anti-VM instructions Red and print their", "add this to plugins.cfg # ; Other plugins #FullColor FullColor.py 0 0 SILENT", "print_insn_mnem(i) == \"call\": funcCalls.append(i) # Color Anti-VM instructions Red and print their location", "#FullColor FullColor.py 0 0 SILENT # thanks @JR0driguezB for help :) from __future__", "Pro Paths: # MAC /Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program Files (x86)\\IDA X\\plugins", "i in heads: # Color the Calls off-white if print_insn_mnem(i) == \"call\": funcCalls.append(i)", "help needed\" wanted_name = \"FullColor\" wanted_hotkey = \"\" def init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0)", "print(\"Number of potential Anti-VM instructions: %d\" % (len(antiVM))) for i in antiVM: print(\"Anti-VM", "comment = \"Set colors :)\" help = \"No help needed\" wanted_name = \"FullColor\"", "from __future__ import print_function from idautils import Heads from idc import get_segm_start, get_segm_end,", ":) from __future__ import print_function from idautils import Heads from idc import get_segm_start,", "import Heads from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM import", "needed\" wanted_name = \"FullColor\" wanted_hotkey = \"\" def init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0) return", "i) set_color(i, CIC_ITEM, 0x0000ff) print(\"Number of xor: %d\" % (len(xor))) for i in", "= 1 class FullColor_t(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment = \"Set colors :)\" help", "xor: %d\" % (len(xor))) for i in xor: set_color(i, CIC_ITEM, 0x00a5ff) def term(self):", "colors :)\" help = \"No help needed\" wanted_name = \"FullColor\" wanted_hotkey = \"\"", "%d\" % (len(funcCalls))) for i in funcCalls: set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number of potential", "elif print_insn_mnem(i) == \"xor\" and (print_operand(i,0) != print_operand(i,1)): xor.append(i) print(\"Number of calls: %d\"", "off-white if print_insn_mnem(i) == \"call\": funcCalls.append(i) # Color Anti-VM instructions Red and print", "== \"call\": funcCalls.append(i) # Color Anti-VM instructions Red and print their location elif", "!= print_operand(i,1)): xor.append(i) print(\"Number of calls: %d\" % (len(funcCalls))) for i in funcCalls:", "/Applications/IDA\\ Pro\\ X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program Files (x86)\\IDA X\\plugins # to make it", "for i in xor: set_color(i, CIC_ITEM, 0x00a5ff) def term(self): idaapi.msg(\"term() called!\\n\") def PLUGIN_ENTRY():", "== \"xor\" and (print_operand(i,0) != print_operand(i,1)): xor.append(i) print(\"Number of calls: %d\" % (len(funcCalls)))", "get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM import idaapi #idaapi.auto_wait() PLUGIN_TEST = 1", "%d!\\n\" % arg) heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls = [] xor = []", "arg) heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls = [] xor = [] antiVM =", "(\"sidt\", \"sgdt\", \"sldt\", \"smsw\", \"str\", \"in\", \"cpuid\"): antiVM.append(i) # Color non-zeroing out xor", "in funcCalls: set_color(i, CIC_ITEM, 0xc7fdff) print(\"Number of potential Anti-VM instructions: %d\" % (len(antiVM)))", "# encoding: utf-8 # http://www.hexblog.com/?p=120 # Default IDA Pro Paths: # MAC /Applications/IDA\\", "wanted_hotkey = \"\" def init(self): #idaapi.msg(\"init() called!\\n\") #self.run(0) return idaapi.PLUGIN_OK def run(self, arg=0):", "of potential Anti-VM instructions: %d\" % (len(antiVM))) for i in antiVM: print(\"Anti-VM potential", "Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea())) funcCalls = [] xor = [] antiVM = [] for i", "X/idaq.app/Contents/MacOS/plugins/ # Windows C:\\Program Files (x86)\\IDA X\\plugins # to make it autoexec on" ]
[ "FILES = os.listdir(MYPATH) INP = '' while INP != 'q': INP = input('q", "else to continue') file_choice = random.choice(FILES) pathname_choice = MYPATH + '/' + file_choice", "enter anything else to continue') file_choice = random.choice(FILES) pathname_choice = MYPATH + '/'", "'./out-of-dropbox-2020-08to12-' FILES = os.listdir(MYPATH) INP = '' while INP != 'q': INP =", "'q': INP = input('q to quit, enter anything else to continue') file_choice =", "INP = input('q to quit, enter anything else to continue') file_choice = random.choice(FILES)", "MYPATH = './out-of-dropbox-2020-08to12-' FILES = os.listdir(MYPATH) INP = '' while INP != 'q':", "to continue') file_choice = random.choice(FILES) pathname_choice = MYPATH + '/' + file_choice subprocess.run([\"open\",", "to quit, enter anything else to continue') file_choice = random.choice(FILES) pathname_choice = MYPATH", "quit, enter anything else to continue') file_choice = random.choice(FILES) pathname_choice = MYPATH +", "= './out-of-dropbox-2020-08to12-' FILES = os.listdir(MYPATH) INP = '' while INP != 'q': INP", "os.listdir(MYPATH) INP = '' while INP != 'q': INP = input('q to quit,", "= '' while INP != 'q': INP = input('q to quit, enter anything", "while INP != 'q': INP = input('q to quit, enter anything else to", "random import subprocess MYPATH = './out-of-dropbox-2020-08to12-' FILES = os.listdir(MYPATH) INP = '' while", "= input('q to quit, enter anything else to continue') file_choice = random.choice(FILES) pathname_choice", "!= 'q': INP = input('q to quit, enter anything else to continue') file_choice", "import random import subprocess MYPATH = './out-of-dropbox-2020-08to12-' FILES = os.listdir(MYPATH) INP = ''", "'' while INP != 'q': INP = input('q to quit, enter anything else", "INP = '' while INP != 'q': INP = input('q to quit, enter", "os import random import subprocess MYPATH = './out-of-dropbox-2020-08to12-' FILES = os.listdir(MYPATH) INP =", "anything else to continue') file_choice = random.choice(FILES) pathname_choice = MYPATH + '/' +", "INP != 'q': INP = input('q to quit, enter anything else to continue')", "= os.listdir(MYPATH) INP = '' while INP != 'q': INP = input('q to", "subprocess MYPATH = './out-of-dropbox-2020-08to12-' FILES = os.listdir(MYPATH) INP = '' while INP !=", "input('q to quit, enter anything else to continue') file_choice = random.choice(FILES) pathname_choice =", "import os import random import subprocess MYPATH = './out-of-dropbox-2020-08to12-' FILES = os.listdir(MYPATH) INP", "import subprocess MYPATH = './out-of-dropbox-2020-08to12-' FILES = os.listdir(MYPATH) INP = '' while INP", "continue') file_choice = random.choice(FILES) pathname_choice = MYPATH + '/' + file_choice subprocess.run([\"open\", pathname_choice])" ]